helm手动部署Kafka集群
1、到指定node节点创建pv需挂载的目录,若有分布式存储可忽略
mkdir -p /data/kafka-data-0
mkdir -p /data/kafka-data-1
mkdir -p /data/kafka-data-2
mkdir -p /data/kafka-zookeeper-data-0
2、创建pvc
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-0
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/kafka-data-0
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node-205
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-kafka-22716-0
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
volumeMode: Filesystem
volumeName: kafka-pv-0
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-1
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/kafka-data-1
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node-205
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-kafka-22716-1
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
volumeMode: Filesystem
volumeName: kafka-pv-1
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-pv-2
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/kafka-data-2
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node-205
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-kafka-22716-2
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
volumeMode: Filesystem
volumeName: kafka-pv-2
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kafka-zookeeper-pv-0
spec:
capacity:
storage: 50Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/kafka-zookeeper-data-0
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node-205
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-kafka-22716-zookeeper-0
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-storage
volumeMode: Filesystem
volumeName: kafka-zookeeper-pv-0
resources:
requests:
storage: 50Gi
kubectl apply -f kafka-pvc.yaml -n default
注意:kafka-22716 为后面helm安装的名字,若helm安装时名字修改需要同步修改
3、执行helm安装
helm -n default install kafka-22716 bitnami/kafka --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.autoDiscovery.enabled=true,serviceAccount.create=true,rbac.create=true,replicaCount=3,auth.clientProtocol=sasl,auth.sasl.jaas.clientUsers=user,auth.sasl.jaas.clientPasswords=user@123,auth.sasl.interBrokerMechanism=PLAIN,auth.sasl.mechanisms="PLAIN\,SCRAM-SHA-256\,SCRAM-SHA-512",image.registry=harbor.test.com,image.repository=cmq/kafka,image.tag=2.4.1-debian-10-r43,zookeeper.image.registry=harbor.test.com,zookeeper.image.repository=cmq/zookeeper,zookeeper.image.tag=3.7.0-debian-10-r106,externalAccess.autoDiscovery.image.registry=harbor.test.com,externalAccess.autoDiscovery.image.repository=cmq/kubesphere-kubectl,externalAccess.autoDiscovery.image.tag=v1.91.1,nodeSelector="kubernetes.io/hostname: node-205"
4、获取kafak的nodeport
kubectl -n default get svc | grep kafka