当前位置: 首页 > article >正文

17、ELK

17、ELK

helm 安装 elkfk(kafka 集群外可访问)

ES/Kibana <— Logstash <— Kafka <— Filebeat

部署顺序:

1、elasticsearch

2、kibana

3、kafka

4、logstash

5、filebeat

kubectl create ns elk

helm3部署elkfk

1、elasticsearch
helm repo add elastic https://helm.elastic.co

helm repo list

helm repo update
helm search repo elastic/elasticsearch

cd && helm pull elastic/elasticsearch --untar --version 7.17.3

cd elasticsearch
cat > values-prod.yaml << EOF
# 集群名称
clusterName: "elasticsearch"
# ElasticSearch 6.8+ 默认安装了 x-pack 插件,部分功能免费,这里选禁用
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.17.3"
imagePullPolicy: "IfNotPresent"

esConfig:
 elasticsearch.yml: |
    network.host: 0.0.0.0
    cluster.name: "elasticsearch"
    xpack.security.enabled: false
resources:
  limits:
    cpu: "2"
    memory: "4Gi"
  requests:
    cpu: "1"
    memory: "2Gi"
volumeClaimTemplate:
  storageClassName: "nfs-storage"
  accessModes: [ "ReadWriteOnce" ]
  resources:
    requests:
      storage: 2Ti
service:
  type: NodePort
  port: 9000
  nodePort: 31311
EOF

禁用 Kibana 安全提示(Elasticsearch built-in security features are not enabled)xpack.security.enabled: false

helm upgrade --install --namespace elk es -f ./values-prod.yaml .

验证

curl 192.168.1.200:31311/_cat/health

curl 192.168.1.200:31311/_cat/nodes
2、kibana
helm search repo elastic/kibana

cd && helm pull elastic/kibana --untar --version 7.17.3

cd kibana
cat > values-prod.yaml << 'EOF'
kibanaConfig:
   kibana.yml: |
     server.port: 5601
     server.host: "0.0.0.0"
     elasticsearch.hosts: [ "http://elasticsearch-master-headless:9200" ]
resources:
  limits:
    cpu: "2"
    memory: "2Gi"
  requests:
    cpu: "1"
    memory: "1Gi"
kibanaConfig:
  kibana.yml: |
    i18n.locale: "zh-CN"
service:
  #type: ClusterIP
  type: NodePort
  loadBalancerIP: ""
  port: 5601
  nodePort: "30026"
EOF
helm upgrade --install --namespace elk kibana -f ./values-prod.yaml .
cat > ~/kibana/kibana-Ingress.yml << 'EOF'
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana-ingress
  namespace: elk
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: 'true'
    nginx.ingress.kubernetes.io/proxy-body-size: '4G'
    nginx.ingress.kubernetes.io/auth-type: basic
    nginx.ingress.kubernetes.io/auth-secret: kibana-auth-secret
    nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - admin'
spec:
  ingressClassName: nginx
  rules:
  - host: kibana.huanghuanhui.cloud
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kibana-kibana
            port:
              number: 5601
  tls:
  - hosts:
    - kibana.huanghuanhui.cloud
    secretName: kibana-ingress-tls
EOF
yum -y install httpd-tools

cd ~/kibana && htpasswd -bc auth admin Admin@2024

kubectl create secret generic kibana-auth-secret --from-file=auth -n elk
kubectl create secret -n elk \
tls kibana-ingress-tls \
--key=/root/ssl/huanghuanhui.cloud.key \
--cert=/root/ssl/huanghuanhui.cloud.crt
kubectl apply -f ~/kibana/kibana-Ingress.yml 

访问地址:kibana.huanghuanhui.cloud

账号密码:admin、Admin@2024

http://192.168.1.201:30026/app/dev_tools#/console

GET _cat/nodes

GET _cat/health

GET _cat/indices
3、kafka(k8s部署kafka集群 ==》外部访问)
mkdir -p ~/kafka-yml && cd ~/kafka-yml
cat > ~/kafka-yml/zk.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:
  labels:
    app: zookeeper-cluster
  namespace: elk
  name: zookeeper-cluster
spec:
  selector:
    app: zookeeper-cluster
  ports:
    - name: client
      port: 2181
      targetPort: 2181
    - name: follower
      port: 2888
      targetPort: 2888
    - name: leader
      port: 3888
      targetPort: 3888
  clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
  namespace: elk
  name: zookeeper-cs
spec:
  selector:
    app: zookeeper-cluster
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 30152
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: elk
  name: crs-zookeeper
spec:
  replicas: 3
  podManagementPolicy: Parallel
  serviceName: zookeeper-cluster
  selector:
    matchLabels:
      app: zookeeper-cluster
  template:
    metadata:
      labels:
        component: zookeeper-cluster
        app: zookeeper-cluster
    spec:
      containers:
        - name: zookeeper
          image: bitnami/zookeeper:3.8.2
          imagePullPolicy: IfNotPresent
          securityContext:
            runAsUser: 0
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          lifecycle:
            postStart:
              exec:
                command:
                  - "sh"
                  - "-c"
                  - >
                    echo $(( $(cat /etc/hosts | grep zookeeper | awk '{print($3)}' | awk '{split($0,array,"-")} END{print array[3]}') + 1 )) > /bitnami/zookeeper/data/myid
          env:
            - name: ALLOW_ANONYMOUS_LOGIN
              value: "yes"
            - name: ZOO_SERVERS
              value:  crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2888:3888,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2888:3888
          volumeMounts:
            - name: zoodata-outer
              mountPath: /bitnami/zookeeper
  volumeClaimTemplates:
    - metadata:
        name: zoodata-outer
      spec:
        storageClassName: nfs-storage
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: 2Ti
EOF
kubectl apply -f ~/kafka-yml/zk.yml
cat > ~/kafka-yml/kafka.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:
  namespace: elk
  name: kafka-headless
spec:
  selector:
    app: kafka-cluster
  ports:
    - name: client
      port: 9092
      targetPort: 9092
  clusterIP: None
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-0
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30127
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-0
#    app: kafka-cluster
 
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-1
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30128
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-1
 
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-2
  namespace: elk
  labels:
    app: kafka-cluster
spec:
  ports:
    - port: 9092
      targetPort: 9092
      nodePort: 30129
      name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: crs-kafka-2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: elk
  name: crs-kafka
spec:
  replicas: 3
  podManagementPolicy: Parallel
  serviceName: kafka-cluster
  selector:
    matchLabels:
      app: kafka-cluster
  template:
    metadata:
      labels:
        app: kafka-cluster
    spec:
      hostname: kafka
      containers:
        - name: kafka
          command:
            - bash
            - -ec
            - |
              HOSTNAME=`hostname -s`
              if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
                ORD=${BASH_REMATCH[2]}
                PORT=$((ORD + 30127))
                export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.1.200:$PORT"
              else
                echo "Failed to get index from hostname $HOST"
                exit 1
              fi
              exec /entrypoint.sh /run.sh
          image: bitnami/kafka:3.5.1
          #        image: bitnami/kafka:latest
          imagePullPolicy: IfNotPresent
          securityContext:
            runAsUser: 0
#          resources:
#            requests:
#              memory: "1G"
#              cpu: "0.5"
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_CFG_ZOOKEEPER_CONNECT
              value: crs-zookeeper-0.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-1.zookeeper-cluster.elk.svc.cluster.local:2181,crs-zookeeper-2.zookeeper-cluster.elk.svc.cluster.local:2181
            #          value: zookeeper-cluster:2181
            - name: ALLOW_PLAINTEXT_LISTENER
              value: "yes"
          volumeMounts:
            - name: kafkadata-outer
              mountPath: /bitnami/kafka
  volumeClaimTemplates:
    - metadata:
        name: kafkadata-outer
      spec:
        storageClassName: nfs-storage
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: 2Ti
EOF
kubectl apply -f ~/kafka-yml/kafka.yml

注意修改yml文件98行里面的export的ip地址

这里修改为公网的ip:58.34.61.154(内网192.168.1.200)

kafka ui

docker pull provectuslabs/kafka-ui:latest

docker pull freakchicken/kafka-ui-lite
docker run -d \
--name kafka-ui1 \
--restart always \
--privileged=true \
-p 8888:8080 \
-e KAFKA_CLUSTERS_0_NAME=k8s-kafka \
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129 \
provectuslabs/kafka-ui:latest

访问地址:192.168.1.200:8888

docker run -d \
--name kafka-ui2 \
--restart always \
--privileged=true \
-p 8889:8889 \
freakchicken/kafka-ui-lite

访问地址:192.168.1.200:8889

4、filebeat

1、k8s方式

helm search repo elastic/filebeat

cd && helm pull elastic/filebeat --untar --version 7.17.3

cd filebeat
cat > values-prod.yaml << 'EOF'
daemonset:
  filebeatConfig:
    filebeat.yml: |
      filebeat.inputs:
      - type: container
        paths:
          - /var/log/containers/*.log

      output.elasticsearch:
        enabled: false
        host: '${NODE_NAME}'
        hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
      output.kafka:
       enabled: true
       hosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]
       topic: k8s-logs
EOF
helm upgrade --install --namespace elk filebeat -f ./values-prod.yaml .

2、docker方式

cat > filebeat.yml << 'EOF'
# 日志输入配置(可配置多个)
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /mnt/nfs/logs/*/*.log
  tags: ["dev-c"]
  fields:
    server: dev-c
  fields_under_root: true
#日志输出配置
output.kafka:
  enabled: true
  hosts: ["192.168.1.200:30127","192.168.1.200:30128","192.168.1.200:30129"]
  topic: "dev-c"
  partition.round_robin:
    reachable_only: false
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000
EOF
docker run -d --name filebeat \
--user=root \
--restart=always \
-v /mnt/nfs/logs/:/mnt/nfs/logs/ \
-v /root/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
elastic/filebeat:7.17.3 \
5、logstash
helm search repo elastic/logstash

cd && helm pull elastic/logstash --untar --version 7.17.3

cd logstash
cat > values-prod.yaml << 'EOF'
logstashConfig:
  logstash.yml: |
    xpack.monitoring.enabled: false

logstashPipeline: 
   logstash.yml: |
    input {
      kafka {
            bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"
            topics => ["k8s-logs"]
            #group_id => "mygroup"
            #如果使用元数据就不能使用下面的byte字节序列化,否则会报错
            #key_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
            #value_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
            consumer_threads => 1
            #默认为false,只有为true的时候才会获取到元数据
            decorate_events => true
            auto_offset_reset => "earliest"
         }
    }
    filter {
      mutate {
        #从kafka的key中获取数据并按照逗号切割
        split => ["[@metadata][kafka][key]", ","]
        add_field => {
            #将切割后的第一位数据放入自定义的“index”字段中
            "index" => "%{[@metadata][kafka][key][0]}"
        }
      }
    }
    output { 
      elasticsearch {
          pool_max => 1000
          pool_max_per_route => 200
          hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]
          index => "k8s-logs-%{+YYYY.MM.dd}"
      }
    }

# 资源限制
resources:
  requests:
    cpu: "100m"
    memory: "256Mi"
  limits:
    cpu: "1000m"
    memory: "1Gi"

persistence:
  enabled: true
  
volumeClaimTemplate:
  accessModes: ["ReadWriteOnce"]
  storageClassName: nfs-storage
  resources:
    requests:
      storage: 2Ti
EOF
helm upgrade --install --namespace elk logstash -f ./values-prod.yaml .

手撕yml

mkdir -p ~/logstash-yml && cd ~/logstash-yml
cat > logstash.yaml << 'EOF'
apiVersion: v1
kind: ConfigMap
metadata:
  name: logstash-dev-configmap
  namespace: elk
data:
  logstash.yml: |
    http.host: "0.0.0.0"
    path.config: /usr/share/logstash/pipeline
  logstash.conf: |
    input {
      kafka {
        bootstrap_servers => "192.168.1.200:30127,192.168.1.200:30128,192.168.1.200:30129"
        topics => ["dev"]
        codec => "json"
        type => "dev"
        group_id => "dev"
        consumer_threads => 1
      }
    }
    filter {
        if [type] == "dev" {
            json {
                source => ["message"]
                remove_field => ["offset","host","beat","@version","event","agent","ecs"]
            }
            mutate {
                add_field => {
                project_path => "%{[log][file][path]}"
                }
            }
            mutate {
                split => ["project_path", "/"]
                add_field => {
                    "project_name" => "%{[project_path][-3]}"
                }
            }
            date {
                match => ["time","yyyy-MM-dd HH:mm:ss.SSS"]
                timezone => "Asia/Shanghai"
                target => "@timestamp"
            }
            mutate {
               remove_field => ["log","project_path","time","input"]
            }
        }
    }
    output {
        elasticsearch {
            hosts => ["elasticsearch-master-headless.elk.svc.cluster.local:9200"]
            index => "dev-%{+YYYY.MM.dd}"
        }
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: logstash-dev
  namespace: elk
spec:
  selector:
    matchLabels:
      app: logstash-dev
  replicas: 1
  template:
    metadata:
      labels:
        app: logstash-dev
    spec:
      containers:
      - name: logstash-dev
        image: docker.elastic.co/logstash/logstash:7.17.3
        ports:
        - containerPort: 5044
        volumeMounts:
          - name: logstash-pipeline-volume
            mountPath: /usr/share/logstash/pipeline
          - mountPath: /etc/localtime
            name: localtime
      volumes:
      - name: logstash-pipeline-volume
        configMap:
          name: logstash-dev-configmap
          items:
            - key: logstash.conf
              path: logstash.conf
      - hostPath:
          path: /etc/localtime
        name: localtime
---
kind: Service
apiVersion: v1
metadata:
  name: logstash-dev
  namespace: elk
spec:
  selector:
    app: logstash
  type: ClusterIP
  ports:
  - protocol: TCP
    port: 5044
    targetPort: 5044
EOF
kubectl apply -f logstash.yaml

http://www.kler.cn/a/233769.html

相关文章:

  • Vite初始化Vue3+Typescrpt项目
  • Java之泛型--对象指定多个泛型类型(有示例)
  • vue项目npm run serve出现【- Network: unavailable】(从排查到放弃)
  • 品融电商:新形势下电商平台如何助力品牌长期经营
  • 使用docker-compose单点搭建社区版seafile+onlyoffice在线word编辑平台
  • 【ARM Coresight OpenOCD 系列 5 -- arp_examine 使用介绍】
  • 785. 快速排序
  • 【数据分享】1929-2023年全球站点的逐日平均风速数据(Shp\Excel\免费获取)
  • Spring Boot 自定义指标
  • Matplotlib交互
  • Linux运行级别 | 管理Linux服务
  • Springboot集成rabbitmq
  • linux系统非关系型数据库memcached
  • 【SpringBoot】Redis集中管理Session和自定义用户参数解决登录状态及校验问题
  • spring boot学习第十二篇:mybatis框架中调用存储过程控制事务性
  • 六、滚动条操作——调整图像亮度
  • 《Docker极简教程》--Docker环境的搭建--在Linux上搭建Docker环境
  • 架设游戏服务器租用价格?腾讯云和阿里云价格对比
  • 跟着cherno手搓游戏引擎【23】项目维护、2D引擎之前的一些准备
  • 小程序配置服务器域名流程指南
  • 机器学习2---逻辑回归(基础准备)
  • 新概念英语第二册(62)
  • vim常用命令以及配置文件
  • 物联网中基于WIFI的室内温度检测系统设计
  • Blender 的重拓扑功能中的参数,
  • c++中的模板(1) -- 什么是模板