当前位置: 首页 > article >正文

k8s创建es和kibana

前提条件,已经安装了docker和配置了k8s集群,前面文章有配置k8s的详细说明。
1、创建动态申请存储的声明

vi sc.yaml
#输入以下内容

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: es-nfs-storage
provisioner: yixiu
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Retain
allowVolumeExpansion: True

2、角色授权即绑定

vi rbac.yaml
#输入以下内容

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: es-nfs-storage
provisioner: yixiu
parameters:
  archiveOnDelete: "true"
reclaimPolicy: Retain
allowVolumeExpansion: True
[root@kuboard es]# cat rbac.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: es-nfs-client-provisioner
  namespace: default
  
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: es-nfs-client-provisioner-runner
rules:
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["events"]
  verbs: ["create", "update", "patch"]
  
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-es-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: es-nfs-client-provisioner
  namespace: default
roleRef:
  kind: ClusterRole
  name: es-nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
  
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-es-nfs-client-provisioner
  namespace: default
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
  
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-es-nfs-client-provisioner
  namespace: default
subjects:
- kind: ServiceAccount
  name: es-nfs-client-provisioner
  namespace: default
roleRef:
  kind: Role
  name: leader-locking-es-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

3、部署 nfs 控制器

vi deploy.yaml
#输入以下内容

apiVersion: apps/v1
kind: Deployment
metadata:
  name: es-nfs-client-provisioner
  labels:
    app: es-nfs-client-provisioner
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: es-nfs-client-provisioner
  template:
    metadata:
      labels:
        app: es-nfs-client-provisioner
    spec:
      serviceAccountName: es-nfs-client-provisioner
      containers:
      - name: es-nfs-client-provisioner
        image: easzlab/nfs-subdir-external-provisioner:v4.0.1
        #镜像拉取策略看自己的网速把
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - name: nfs-client-root
          mountPath: /persistentvolumes
        env:
        - name: PROVISIONER_NAME
          value: yixiu
        - name: NFS_SERVER
          value: 192.168.2.226
        - name: NFS_PATH
          value: /home/nfs/es
      volumes:
      - name: nfs-client-root
        nfs:
          server: 192.168.2.226
          path: /home/nfs/es

4、创建es的服务

vi svc.yaml
#输入以下内容
apiVersion: v1
kind: Service
metadata:
  name: es-cluster-svc
  namespace: default
spec:
  selector:
    app: es
  type: NodePort
  ports:
  - name: restful
    port: 9200
    targetPort: 9200
    nodePort: 32000

5、部署es

vi es.yaml
#输入以下内容
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es-cluster
  namespace: default 
spec:
  serviceName: es-cluster-svc
  replicas: 3
  selector:
    matchLabels:
      app: es
  template:
    metadata:
      labels: 
        app: es
    spec:
      initContainers:
      - name: increase-vm-max-map
        image: busybox:1.32
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
      - name: increase-fd-ulimit
        image: busybox:1.32
        command: ["sh", "-c", "ulimit -n 65536"]
        securityContext:
          privileged: true
      containers:
      - name: es-container
        image: elasticsearch:7.8.0
        ports:
        - name: restful
          containerPort: 9200
          protocol: TCP
        - name: internal
          containerPort: 9300
          protocol: TCP
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        env:
        - name: cluster.name
          value: es-prod
        # 定义节点名,使用metadata.name名称
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        # 初始化集群时,ES从中选出master节点
        - name: cluster.initial_master_nodes
          # 对应metadata.name名称加编号,编号从0开始
          value: "es-cluster-0,es-cluster-1,es-cluster-2"
        - name: discovery.zen.minimum_master_nodes
          value: "2"
        - name: discovery.seed_hosts
          value: "es-cluster-0.es-cluster-svc,es-cluster-1.es-cluster-svc,es-cluster-2.es-cluster-svc"
        - name: ES_JAVA_OPTS
          value: "-Xms1g -Xmx1g"
        - name: network.host
          value: "0.0.0.0"
 
  volumeClaimTemplates:
  - metadata:
      name: data
      labels:
        app: es-volume
      namespace: defalut
    spec:
      # 存储卷可以被单个节点读写
      accessModes: 
      - "ReadWriteOnce"
      storageClassName: es-nfs-storage
      resources:
        requests:
          storage: 4Gi

6、部署kibana

vi kibana.yaml
#输入以下内容
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: default
  labels:
    app: kibana
spec:
  type: NodePort
  ports:
  - port: 5601
    nodePort: 30601
    targetPort: 5601
  selector:
    app: kibana

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: default
  labels:
    app: kibana
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: kibana:7.8.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
        - name: ELASTICSEARCH_HOSTS
          value: http://es-cluster-svc:9200
        ports:
        - containerPort: 5601
#部署命令:
#按照创建的命令依次执行
kubectl apply -f 创建的yaml文件

http://www.kler.cn/a/37888.html

相关文章:

  • 【NOSQL】redis哨兵模式、集群搭建
  • 天池大赛中药说明书实体识别挑战冠军方案开源(二)部署运行实战 附详细操作说明
  • 从小白到大神之路之学习运维第58天--------Firewalld防火墙
  • RabbitMQ ---- 延迟队列
  • MyBatis入门案列
  • ClassLoader源码阅读
  • 网络安全与密码学
  • php-fpm 如何进程守护,保证进程不死
  • 杂记:逆向一块FPGA核心板
  • 【消息队列设计总结】RabbitMQ、Kafka、RocketMQ对比之运行架构
  • node使用fluent-ffmpeg把webm格式的音频转成mp3
  • ARM Coresight 系列文章 7 - ARM Coresight 通过 AHB-AP 访问 cpu 内部 coresight 组件
  • 【SpringBoot笔记33】SpringBoot中使用@Async注解 + Future实现异步操作并获取返回值
  • dede后台验证码错误或不显示的解决办法
  • 深入理解React与闭包的关系
  • 搭建Promethues + grafana +alertManager+blakbox 监控springboot 健康和接口情况
  • 二层、三层交换机是什么?有什么区别?
  • C++【哈希表的完善及封装】
  • 谷歌Bard更新中文支持;GPT-4:1.8万亿参数、混合专家模型揭秘; Meta推出商用版本AI模型
  • 【课程介绍】OpenCV 基础入门教程:图像读取、显示、保存,图像处理和增强(如滤波、边缘检测、图像变换),特征提取和匹配,目标检测和跟踪