当前位置: 首页 > article >正文

k8s快速搭建+prometheus部署及使用(纯干货!!!)

目录

环境准备

1.所有主机安装docker

2.部署harbor

3.部署k8s

集群初始化

安装网络插件(此时选择的是flannel网络插件 后面也有calico网络插件的安装方法)

节点扩容

4.calico网络插件的部署(如果安装了flannel插件需要先删除)

5.部署微服务LoadBalancer-metalLB

6.部署helm

7.部署prometheus

8 登陆grafana图形化界面

9 导入面板

访问Prometheus 主程序

10 监控使用示例


环境准备

主机角色IP
k8s-master.exam.comk8s集群master172.25.250.100
k8s-node1.exam.comk8s集群worker172.25.250.10
k8s-node2.exam.comk8s集群worker172.25.250.20
reg.exam.comharbor镜像仓库172.25.250.250
#做好本地解析
[root@k8s-master ~]# vim /etc/hosts
172.25.250.100  k8s-master.exam.com
172.25.250.10   k8s-node1.exam.com
172.25.250.20	k8s-node2.exam.com
172.25.250.250	reg.exam.com

[root@k8s-master ~]# scp /etc/hosts root@172.25.250.10:/etc/hosts
[root@k8s-master ~]# scp /etc/hosts root@172.25.250.20:/etc/hosts
[root@k8s-master ~]# scp /etc/hosts root@172.25.250.250:/etc/hosts

1.所有主机安装docker

这里为了方便就选择源码包安装

[root@k8s-master ~]# mkdir docker
[root@k8s-master ~]# cd docker/
[root@k8s-master docker]# 
[root@k8s-master docker]# ls
docker.tar.gz

#解压并安装
[root@k8s-master docker]# tar zxf docker.tar.gz 
[root@k8s-master docker]# ls
containerd.io-1.7.20-3.1.el9.x86_64.rpm       docker-ce-rootless-extras-27.1.2-1.el9.x86_64.rpm
docker-buildx-plugin-0.16.2-1.el9.x86_64.rpm  docker-compose-plugin-2.29.1-1.el9.x86_64.rpm
docker-ce-27.1.2-1.el9.x86_64.rpm             docker.tar.gz
docker-ce-cli-27.1.2-1.el9.x86_64.rpm
#安装所有rpm文件
[root@k8s-master docker]# dnf install *.rpm -y

###部署从机docker	并设置开机启动
[root@k8s-master docker]# scp -r /root/docker root@172.25.250.10:/root/docker
[root@k8s-master docker]# scp -r /root/docker root@172.25.250.20:/root/docker
[root@k8s-node1,2 ~]# cd docker/
[root@k8s-node1,2 docker]# dnf install *.rpm -y

[root@k8s-master docker]# scp -r /etc/docker/ root@172.25.250.10:/etc
[root@k8s-master docker]# scp -r /etc/docker/ root@172.25.250.20:/etc
[root@k8s-node1 ~]# systemctl enable --now docker
[root@k8s-node2 ~]# systemctl enable --now docker


###设置开机启动
[root@k8s-master,node1,node2  docker]# systemctl enable --now docker

###加速器
[root@k8s-master docker]# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://reg.exam.com"]
}

#重启
[root@k8s-master docker]# systemctl daemon-reload 
[root@k8s-master docker]# systemctl restart docker

2.部署harbor

安装docker
[root@harbor ~]# tar zxf docker.tar.gz 
[root@harbor ~]# systemctl enable --now docker

#生成认证key和证书
[root@harbor ~]# mkdir certs

[root@harbor ~]# openssl req -newkey rsa:4096 \
-nodes -sha256 -keyout certs/exam.com.key \
-addext "subjectAltName = DNS:reg.exam.com" \
-x509 -days 365 -out certs/exam.com.crt

#建立认证文件夹
[root@harbor ~]# mkdir /etc/docker/certs.d/reg.exam.com/ -p
[root@harbor ~]# cp /root/certs/exam.com.crt /etc/docker/certs.d/reg.exam.com/ca.crt

[root@harbor ~]# mkdir -p /data/certs
[root@harbor ~]# cp /root/certs/exam.com.key /data/certs/
[root@harbor ~]# cp /root/certs/exam.com.crt /data/certs/

[root@harbor ~]# ls /data/certs/
exam.com.crt  exam.com.key

[root@harbor ~]# systemctl restart docker

#安装harbor
[root@harbor ~]# tar zxf harbor-offline-installer-v2.5.4.tgz 
[root@harbor ~]# cd harbor/
[root@harbor harbor]# ls
common.sh  harbor.v2.5.4.tar.gz  harbor.yml.tmpl  install.sh  LICENSE  prepare
[root@harbor harbor]# cp harbor.yml.tmpl harbor.yml

[root@harbor harbor]# vim harbor.yml
5 	 hostname: reg.exam.com					#harbor仓库所使用的主机域名
17   certificate: /data/certs/exam.com.crt	  #认证
18   private_key: /data/certs/exam.com.key		
34 	 harbor_admin_password: redhat			#登录密码
 
#开始部署
[root@harbor ~]# cd harbor/
[root@harbor harbor]# ./install.sh --with-chartmuseum

#停止和启动
[root@harbor harbor]# docker compose down 
[root@harbor harbor]# docker compose up -d

做Windows解析 网页访问harbor主机IP 登录harbor仓库

3.部署k8s

#关闭所有主机swap分区
[root@k8s-master node1 node2 ~]# swapon -s
[root@k8s-master node1 node2 ~]# swapoff -a
[root@k8s-master node1 node2 ~]# vim /etc/fstab 
 14 #/dev/mapper/rhel-swap   none                    swap    defaults        0 0

#做仓库登录认证
[root@k8s-master ~]# mkdir -p /etc/docker/certs.d/reg.exam.com
[root@harbor ~]# scp /data/certs/exam.com.crt root@172.25.250.100:/etc/docker/certs.d/reg.exam.com/ca.crt
[root@harbor ~]# scp /data/certs/exam.com.crt root@172.25.250.10:/etc/docker/certs.d/reg.exam.com/ca.crt
[root@harbor ~]# scp /data/certs/exam.com.crt root@172.25.250.20:/etc/docker/certs.d/reg.exam.com/ca.crt

[root@k8s-node1 docker]# ls			#所有节点都要
certs.d  daemon.json
[root@k8s-node2 docker]# ls
certs.d  daemon.json

[root@k8s-node1 docker]# systemctl daemon-reload 
[root@k8s-node1 docker]# systemctl restart docker

#登录到reg.exam.com
[root@k8s-master ~]# docker login reg.exam.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credential-stores

Login Succeeded

##################################################################################

安装k8s部署工具

#安装自动补全工具
[root@k8s-master ~]# dnf install bash-completion -y   

#部署harbor软件仓库,添加k8s源	#先检测网络是否连通
[root@k8s-master ~]# vim /etc/yum.repos.d/k8s.repo
[k8s]
name=k8s
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm
gpgcheck=0

#安装软件
[root@k8s-master ~]# dnf install kubelet-1.30.0-150500.1.1 kubeadm-1.30.0-150500.1.1 kubectl-1.30.0-150500.1.1 --downloadonly --downloaddir=/mnt -y

进入/mnt
[root@k8s-master docker]# cd /mnt/
[root@k8s-master mnt]# ls
conntrack-tools-1.4.7-2.el9.x86_64.rpm  kubernetes-cni-1.4.0-150500.1.1.x86_64.rpm
cri-tools-1.30.1-150500.1.1.x86_64.rpm  libnetfilter_cthelper-1.0.0-22.el9.x86_64.rpm
hgfs                                    libnetfilter_cttimeout-1.0.0-19.el9.x86_64.rpm
kubeadm-1.30.0-150500.1.1.x86_64.rpm    libnetfilter_queue-1.0.5-1.el9.x86_64.rpm
kubectl-1.30.0-150500.1.1.x86_64.rpm    socat-1.7.4.1-5.el9.x86_64.rpm
kubelet-1.30.0-150500.1.1.x86_64.rpm

#传给从机
[root@k8s-master mnt]# scp *.rpm root@172.25.250.10:/mnt
[root@k8s-master mnt]# scp *.rpm root@172.25.250.20:/mnt


#安装
[root@k8s-master node1 node2  mnt]# dnf install *.rpm -y

[root@k8s-master mnt]# ls
conntrack-tools-1.4.7-2.el9.x86_64.rpm         libnetfilter_cttimeout-1.0.0-19.el9.x86_64.rpm
hgfs                                           libnetfilter_queue-1.0.5-1.el9.x86_64.rpm
libnetfilter_cthelper-1.0.0-22.el9.x86_64.rpm  socat-1.7.4.1-5.el9.x86_64.rpm

设置kubectl命令补齐功能

[root@k8s-master ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc 
[root@k8s-master ~]# source ~/.bashrc 

安装cri-docker插件(软件包安装)

[root@k8s-master ~]# mkdir k8s
[root@k8s-master ~]# cd k8s/
[root@k8s-master k8s]# ls
cri-dockerd-0.3.14-3.el8.x86_64.rpm  libcgroup-0.41-19.el8.x86_64.rpm

[root@k8s-master k8s]# dnf install libcgroup-0.41-19.el8.x86_64.rpm cri-dockerd-0.3.14-3.el8.x86_64.rpm -y

[root@k8s-master k8s]# systemctl start cri-docker
[root@k8s-master k8s]# systemctl enable --now cri-docker
Created symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service → /usr/lib/systemd/system/cri-docker.service.
[root@k8s-master k8s]# systemctl status cri-docker


#安装从机cri-docker
[root@k8s-master k8s]# scp -r cri-dockerd-0.3.14-3.el8.x86_64.rpm libcgroup-0.41-19.el8.x86_64.rpm root@172.25.250.10:/root/k8s

[root@k8s-master k8s]# scp -r cri-dockerd-0.3.14-3.el8.x86_64.rpm libcgroup-0.41-19.el8.x86_64.rpm root@172.25.250.20:/root/k8s

[root@k8s-node1 ,2  ~]# cd k8s/
[root@k8s-node1 ,2 k8s]# ls
cri-dockerd-0.3.14-3.el8.x86_64.rpm  libcgroup-0.41-19.el8.x86_64.rpm
[root@k8s-node1 ,2 k8s]# dnf install *.rpm -y

在master节点拉取k8s所需镜像

[root@k8s-master k8s]# kubeadm config images pull \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.30.0 \
--cri-socket=unix:///var/run/cri-dockerd.sock

[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.30.0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.11.1
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.9
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.12-0

指定网络插件名称及基础容器镜像(此时安装)

#指定网络插件名称及基础容器镜像
[root@k8s-master k8s]# vim /lib/systemd/system/cri-docker.service 
...
10 ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=reg.exam.com/k8s/pause:3.9
...

上传镜像到harbor仓库

[root@k8s-master k8s]# docker images | awk '/google/{ print $1":"$2}' \
| awk -F "/" '{system("docker tag "$0" reg.exam.com/k8s/"$3)}'

[root@k8s-master k8s]# docker images  | awk '/k8s/{system("docker push "$1":"$2)}'

集群初始化

#启动kubelet cri-docker服务
[root@k8s-master ~]# systemctl status kubelet.service 
[root@k8s-master ~]# systemctl start kubelet.service 
[root@k8s-master ~]# systemctl status cri-docker
[root@k8s-master ~]# systemctl restart cri-docker

#执行初始化命令	确认防火墙 selinux已关闭
[root@k8s-master ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 \
--image-repository=reg.exam.com/k8s \
--kubernetes-version v1.30.0 \
--cri-socket=unix:///var/run/cri-dockerd.sock


#初始化报错 重新初始化
一定注意IP使用默认10.244.0.0
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock

#指定集群配置文件变量
[root@k8s-master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile

[root@k8s-master ~]# source ~/.bash_profile

#当前节点没有就绪,因为还没有安装网络插件,容器没有运行
[root@k8s-master ~]# kubectl get nodes
NAME                  STATUS     ROLES           AGE   VERSION
k8s-master.exam.com   NotReady   control-plane   61s   v1.30.0

[root@k8s-master ~]# kubectl get pods -A
NAMESPACE     NAME                                          READY   STATUS    RESTARTS   AGE
kube-system   coredns-846454f5f7-jsnwq                      0/1     Pending   0          54s
kube-system   coredns-846454f5f7-tpsmh                      0/1     Pending   0          54s
kube-system   etcd-k8s-master.exam.com                      1/1     Running   0          71s
kube-system   kube-apiserver-k8s-master.exam.com            1/1     Running   0          71s
kube-system   kube-controller-manager-k8s-master.exam.com   1/1     Running   0          71s
kube-system   kube-proxy-j2zjt                              1/1     Running   0          54s
kube-system   kube-scheduler-k8s-master.exam.com            1/1     Running   0          72s

安装网络插件(此时选择的是flannel网络插件 后面也有calico网络插件的安装方法)

一、Flannel

  1. 简介:

    • Flannel 是 CoreOS 团队针对 Kubernetes 设计的一个覆盖网络(overlay network)工具,主要用于为容器提供跨主机的网络通信能力。
    • 它通过为每个节点分配一个子网,确保不同节点上的容器可以通过 IP 地址进行通信。
  2. 特点:

    • 简单易用,配置相对较少。
    • 支持多种后端实现,如 UDP、VXLAN 等。
    • 可以与 Kubernetes 很好地集成,是比较常用的网络插件之一。

二、Calico

  1. 简介:

    • Calico 是一个纯三层的数据中心网络方案,为容器、虚拟机和裸金属服务器提供安全的网络连接。
    • 它使用 BGP(Border Gateway Protocol,边界网关协议)在节点之间路由数据包,并且可以与现有的网络架构很好地集成。
  2. 特点:

    • 性能较好,特别是在大规模集群中。
    • 提供丰富的网络策略功能,可以实现细粒度的网络访问控制。
    • 支持 IPIP 和 VXLAN 等多种工作模式
一、利用k8s云仓库下载flannel的yaml部署文件		
1、[root@k8s-master ~]# wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

2、[root@k8s-master ~]# curl -L -o kube-flannel.yml https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

#下载镜像:
[root@k8s-master ~]# docker pull docker.io/flannel/flannel:v0.25.5

[root@k8s-master ~]# docker pull docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel1


二、或者使用软件包
[root@k8s-master ~]# mkdir flannel
[root@k8s-master ~]# cd flannel/
[root@k8s-master flannel]# ls
flannel-0.25.5.tag.gz  kube-flannel.yml

[root@k8s-master flannel]# docker load -i flannel-0.25.5.tag.gz 

##新建仓库并上传到仓库
[root@k8s-master flannel]# docker tag flannel/flannel:v0.25.5 \
reg.exam.com/flannel/flannel:v0.25.5
[root@k8s-master flannel]# docker push reg.exam.com/flannel/flannel:v0.25.5

[root@k8s-master flannel]# docker tag flannel/flannel-cni-plugin:v1.5.1-flannel1 \
reg.exam.com/flannel/flannel-cni-plugin:v1.5.1-flannel1
[root@k8s-master flannel]# docker push reg.exam.com/flannel/flannel-cni-plugin:v1.5.1-flannel1

#安装flannel网络插件
[root@k8s-master flannel]# kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created


[root@k8s-master ~]# kubectl -n kube-system get pods 
NAME                                          READY   STATUS    RESTARTS   AGE
coredns-846454f5f7-jsnwq                      1/1     Running   0          9m6s
coredns-846454f5f7-tpsmh                      1/1     Running   0          9m6s
etcd-k8s-master.exam.com                      1/1     Running   0          9m23s
kube-apiserver-k8s-master.exam.com            1/1     Running   0          9m23s
kube-controller-manager-k8s-master.exam.com   1/1     Running   0          9m23s
kube-proxy-j2zjt                              1/1     Running   0          9m6s
kube-scheduler-k8s-master.exam.com            1/1     Running   0          9m24s

节点扩容

在所有的worker节点中

1 确认部署好以下内容

2 禁用swap

3 安装:

  • kubelet-1.30.0

  • kubeadm-1.30.0

  • kubectl-1.30.0

  • docker-ce

  • cri-dockerd

  • 4 修改cri-dockerd启动文件添加

    • --network-plugin=cni

    • --pod-infra-container-image=reg.exam.com/k8s/pause:3.9

    5 启动服务

    • kubelet.service

    • cri-docker.service

    以上信息确认完毕后即可加入集群

易忽略点!!!
1.未安装- kubelet-1.30.0 kubeadm-1.30.0 kubectl-1.30.0 

2.未修改cri-dockerd启动文件添加 --network-plugin=cni --pod-infra-container image=reg.exam.com/k8s/pause:3.9


###步骤###
[root@k8s-node1 ~]# vim /lib/systemd/system/cri-docker.service 

第10行后添加 --network-plugin=cni --pod-infra-container-image=reg.exam.com/k8s/pause:3.9


3. 检查全机服务是否开启
[root@k8s-master node1 node2~]# systemctl restart cri-docker
[root@k8s-master node1 node2~]# systemctl restart kubelet.service

开始

##Note:在此阶段如果生成的集群token找不到了可以重新生成
[root@k8s-master mnt]# kubeadm token create --print-join-command

#把masater生成的token复制到node机 并加上制定参数
--cri-socket=unix:///var/run/cri-dockerd.sock

[root@k8s-master ~]# kubectl get nodes 
NAME                  STATUS   ROLES           AGE   VERSION
k8s-master.exam.com   Ready    control-plane   33m   v1.30.0
k8s-node1.exam.com    Ready    <none>          37s   v1.30.0
k8s-node2.exam.com    Ready    <none>          25s   v1.30.0

Step:如果看到的都是Ready 那k8s就部署完成了!

4.calico网络插件的部署(如果安装了flannel插件需要先删除)

删除所有节点上flannel配置文件,避免冲突

#master主机
[root@k8s-master ~]# cd flannel/
[root@k8s-master flannel]# ls
flannel-0.25.5.tag.gz  kube-flannel.yml
[root@k8s-master flannel]# kubectl delete -f kube-flannel.yml 
namespace "kube-flannel" deleted
serviceaccount "flannel" deleted
clusterrole.rbac.authorization.k8s.io "flannel" deleted
clusterrolebinding.rbac.authorization.k8s.io "flannel" deleted
configmap "kube-flannel-cfg" deleted
daemonset.apps "kube-flannel-ds" deleted

[root@k8s-master flannel]# rm -rf /etc/cni/net.d/10-flannel.conflist

#node机
[root@k8s-node1 ~]# rm -rf /etc/cni/net.d/10-flannel.conflist
[root@k8s-node2 ~]# rm -rf /etc/cni/net.d/10-flannel.conflist

下载部署文件

[root@k8s-master ~]# mkdir calico
[root@k8s-master ~]# cd calico/
[root@k8s-master calico]# curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico-typha.yaml -o calico.yaml

[root@k8s-master calico]# ls
calico.yaml

下载镜像上传至仓库,这里就直接使用了软件包

[root@k8s-master calico]# ls
calico-3.28.1.tar  calico.yaml

[root@k8s-master calico]# docker load -i calico-3.28.1.tar 
[root@k8s-master calico]# docker tag calico/cni:v3.28.1 reg.exam.com/calico/cni:v3.28.1
[root@k8s-master calico]# docker tag calico/node:v3.28.1 reg.exam.com/calico/node:v3.28.1
[root@k8s-master calico]# docker tag calico/kube-controllers:v3.28.1 reg.exam.com/calico/kube-controllers:v3.28.1
[root@k8s-master calico]# docker tag calico/typha:v3.28.1 reg.exam.com/calico/typha:v3.28.1

[root@k8s-master calico]# docker push reg.exam.com/calico/cni:v3.28.1
[root@k8s-master calico]# docker push reg.exam.com/calico/node:v3.28.1
[root@k8s-master calico]# docker push reg.exam.com/calico/kube-controllers:v3.28.1
[root@k8s-master calico]# docker push reg.exam.com/calico/typha:v3.28.1

更改yml设置

[root@k8s-master calico]# vim calico.yaml
4835           image: calico/cni:v3.28.1
4906           image: calico/node:v3.28.1
4932           image: calico/node:v3.28.1

5158           image: calico/kube-controllers:v3.28.1
5247       		- image: calico/typha:v3.28.1

4973             - name: CALICO_IPV4POOL_VXLAN
4974               value: "Never"

4999             - name: CALICO_IPV4POOL_CIDR
5000               value: "10.244.0.0/16"
5001             - name: CALICO_AUTODETECTION_METHOD
5002               value: "interface=eth0"

[root@k8s-master calico]# kubectl apply -f calico.yaml 

#全部running
[root@k8s-master calico]# kubectl -n kube-system get pods 
NAME                                          READY   STATUS    RESTARTS   AGE
calico-kube-controllers-6849cb478c-vnhsn      1/1     Running   0          21s
calico-node-db29w                             1/1     Running   0          21s
calico-node-gkdxk                             1/1     Running   0          21s
calico-node-prxls                             1/1     Running   0          21s
calico-typha-fff9df85f-lvxkv                  1/1     Running   0          21s
coredns-846454f5f7-jsnwq                      1/1     Running   0          93m
coredns-846454f5f7-tpsmh                      1/1     Running   0          93m
etcd-k8s-master.exam.com                      1/1     Running   0          93m
kube-apiserver-k8s-master.exam.com            1/1     Running   0          93m
kube-controller-manager-k8s-master.exam.com   1/1     Running   0          93m
kube-proxy-gkgmq                              1/1     Running   0          60m
kube-proxy-hx2r4                              1/1     Running   0          60m
kube-proxy-j2zjt                              1/1     Running   0          93m
kube-scheduler-k8s-master.exam.com            1/1     Running   0          93m

#服务起不来 试试重启服务
[root@k8s-master calico]# systemctl restart cri-docker
[root@k8s-master calico]# systemctl restart kubelet.service 

5.部署微服务LoadBalancer-metalLB

云平台会为我们分配vip并实现访问,如果是裸金属主机那么需要metallb来实现ip的分配

MetalLB 为裸金属 Kubernetes 集群提供了一种实现 LoadBalancer 服务类型的有效方式,使得在没有云服务提供商支持的情况下,也能实现外部流量的负载均衡和高可用性

Prometheus 设置 LoadBalancer(负载均衡器)主要有以下几个原因:

一、高可用性

  1. 确保服务的持续可用:

    • Prometheus 在监控大规模系统时至关重要。如果没有负载均衡器,当 Prometheus 服务器出现故障时,可能会导致监控中断,影响对系统状态的及时了解和问题排查。
    • 通过设置负载均衡器,可以将请求分发到多个 Prometheus 实例上,当其中一个实例出现问题时,负载均衡器可以自动将请求转发到其他正常运行的实例上,从而提高整个监控系统的可用性。
  2. 防止单点故障:

    • 在没有负载均衡的情况下,Prometheus 服务器通常是单点部署的,这意味着如果该服务器出现故障,整个监控系统将无法正常工作。
    • 而使用负载均衡器可以将请求分发到多个 Prometheus 实例上,即使其中一个实例出现故障,其他实例仍然可以继续处理请求,从而避免了单点故障。

二、可扩展性

  1. 适应业务增长:

    • 随着被监控系统的规模不断扩大,Prometheus 可能需要处理更多的监控数据和请求。通过设置负载均衡器,可以方便地添加更多的 Prometheus 实例来分担负载,从而满足业务增长的需求。
    • 负载均衡器可以根据实际的负载情况自动调整请求的分发策略,确保每个 Prometheus 实例都能够得到合理的负载,提高整个系统的性能和可扩展性。
  2. 灵活的资源分配:

    • 负载均衡器可以根据不同的需求和资源状况,将请求分发到不同性能的 Prometheus 实例上。例如,可以将一些重要的监控任务分配到性能较高的实例上,而将一些不太重要的任务分配到性能较低的实例上,从而实现资源的合理利用。

三、负载均衡和性能优化

  1. 均衡请求负载:

    • Prometheus 在收集和处理监控数据时可能会面临较大的负载压力。通过设置负载均衡器,可以将请求均匀地分发到多个 Prometheus 实例上,避免某个实例负载过高而影响性能。
    • 负载均衡器可以采用不同的负载均衡算法,如轮询、加权轮询、最少连接等,根据实际情况选择合适的算法可以更好地均衡负载,提高系统的整体性能。
  2. 优化网络性能:

    • 负载均衡器通常位于网络的关键位置,可以对网络流量进行优化和管理。例如,它可以缓存一些常用的数据,减少重复的请求和数据传输,从而提高网络性能。
    • 负载均衡器还可以对网络连接进行优化,如合并多个小的请求为一个大的请求,减少网络开销,提高数据传输效率。
[root@k8s-master ~]# mkdir metalLB
[root@k8s-master ~]# cd metalLB/

[root@k8s-master metalLB]# ls
configmap.yml  metallb-native.yaml  metalLB.tag.gz

[root@k8s-master metalLB]# docker load -i metalLB.tag.gz 
Loaded image: quay.io/metallb/controller:v0.14.8
Loaded image: quay.io/metallb/speaker:v0.14.8

#打包上传
[root@k8s-master metalLB]# docker tag quay.io/metallb/speaker:v0.14.8 reg.exam.com/metallb/speaker:v0.14.8
[root@k8s-master metalLB]# docker tag quay.io/metallb/controller:v0.14.8 reg.exam.com/metallb/controller:v0.14.8

[root@k8s-master metalLB]# docker push reg.exam.com/metallb/speaker:v0.14.8 
[root@k8s-master metalLB]# docker push reg.exam.com/metallb/controller:v0.14.8 


##部署服务
[root@k8s-master metalLB]# kubectl edit cm -n kube-system kube-proxy 
44     strictARP: true
59     mode: "ipvs"

[root@k8s-master metalLB]# kubectl apply -f metallb-native.yaml

[root@k8s-master metalLB]# kubectl -n metallb-system get pods
NAME                          READY   STATUS    RESTARTS   AGE
controller-65957f77c8-z4fdf   1/1     Running   0          25s
speaker-28hcg                 1/1     Running   0          25s
speaker-6xxc4                 1/1     Running   0          25s
speaker-thsr2                 1/1     Running   0          25s

#分配IP池
[root@k8s-master metalLB]# vim configmap.yml 
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: first-pool
  namespace: metallb-system
spec:
  addresses:
  - 172.25.250.50-172.25.250.99

---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: example
  namespace: metallb-system
spec:
  ipAddressPools:
  - first-pool
  
[root@k8s-master metalLB]# kubectl apply -f configmap.yml 
ipaddresspool.metallb.io/first-pool created
l2advertisement.metallb.io/example created

[root@k8s-master metalLB]# kubectl -n metallb-system get configmaps 
NAME                DATA   AGE
kube-root-ca.crt    1      6m31s
metallb-excludel2   1      6m31s

6.部署helm

部署 Helm 有以下几个重要原因:

一、应用管理便捷性

  1. 简化应用部署:

    • Helm 提供了一种将复杂的应用程序打包为可重复使用的模板的方式。在部署应用时,无需手动配置和管理大量的 Kubernetes 资源文件,只需使用 Helm 图表即可快速部署应用。这大大简化了应用的部署过程,尤其是对于那些包含多个微服务和复杂配置的应用。
    • 例如,一个包含数据库、后端服务和前端界面的应用可以通过一个 Helm 图表进行部署,而无需分别管理每个组件的部署文件。
  2. 版本管理:

    • Helm 允许对应用的不同版本进行管理。你可以轻松地升级、回滚应用到特定版本,这对于在生产环境中管理应用的更新非常重要。
    • 当发现新版本的应用存在问题时,可以快速回滚到之前稳定的版本,减少因应用更新导致的系统故障时间。

二、可重复性和一致性

  1. 跨环境部署:

    • 通过 Helm,可以确保在不同的环境(如开发、测试、生产环境)中以一致的方式部署应用。这有助于减少因环境差异导致的部署问题,提高应用的稳定性和可靠性。
    • 例如,开发团队可以使用相同的 Helm 图表在开发环境中进行测试,然后在生产环境中进行部署,确保应用在不同环境中的行为一致。
  2. 团队协作:

    • Helm 图表可以在团队成员之间共享,使得团队成员能够以一致的方式部署应用。这有助于提高团队协作效率,减少因部署方式不一致导致的问题。
    • 团队成员可以共同维护和更新 Helm 图表,确保应用的部署过程始终保持高效和可靠。

三、可扩展性和灵活性

  1. 自定义和扩展:

    • Helm 允许用户根据自己的需求对应用进行自定义和扩展。你可以通过修改 Helm 图表中的 values 文件来调整应用的配置参数,或者添加自己的自定义资源文件。
    • 例如,可以根据不同的环境需求调整数据库的配置参数,或者添加特定的监控和日志收集组件。
  2. 社区支持:

    • Helm 拥有一个活跃的社区,提供了大量的第三方 Helm 图表。这些图表可以帮助你快速部署常见的应用程序和服务,节省开发和部署时间。
    • 你可以从社区中获取灵感和最佳实践,同时也可以将自己的 Helm 图表分享给社区,为其他用户提供帮助。

安装helm

[root@k8s-master ~]# mkdir helm
[root@k8s-master ~]# cd helm/
[root@k8s-master helm]# ls
helm-push_0.10.4_linux_amd64.tar.gz  helm-v3.15.4-linux-amd64.tar.gz

[root@k8s-master helm]# tar zxf helm-v3.15.4-linux-amd64.tar.gz 
[root@k8s-master helm]# cd linux-amd64/
[root@k8s-master linux-amd64]# ls
helm  LICENSE  README.md
[root@k8s-master linux-amd64]# cp -p helm /usr/local/bin/

配置helm命令补齐

[root@k8s-master linux-amd64]# echo "source <(helm completion bash)" >> ~/.bashrc
[root@k8s-master linux-amd64]# source ~/.bashrc 
[root@k8s-master linux-amd64]# helm version 
version.BuildInfo{Version:"v3.15.4", GitCommit:"fa9efb07d9d8debbb4306d72af76a383895aa8c4", GitTreeState:"clean", GoVersion:"go1.22.6"}

7.部署prometheus

根据所有项目中的values.yaml中指定的image路径下载容器镜像并上传至harbor仓库

[root@k8s-master ~]# mkdir prometheus
[root@k8s-master ~]# cd prometheus/

[root@k8s-master prometheus]# ls
grafana-11.2.0.tar                kube-state-metrics-2.13.0.tar  nginx-exporter-1.3.0-debian-12-r2.tar  prometheus-62.6.0.tar
kube-prometheus-stack-62.6.0.tgz  nginx-18.1.11.tgz              node-exporter-1.8.2.tar

-----------------------------------------------------------------------------------
[root@k8s-master prometheus]# tar zxf kube-prometheus-stack-62.6.0.tgz 
[root@k8s-master prometheus]# cd kube-prometheus-stack/
[root@k8s-master kube-prometheus-stack]# ls
Chart.lock  charts  Chart.yaml  CONTRIBUTING.md  README.md  templates  values.yaml

#修改到本地harbor仓库
[root@k8s-master kube-prometheus-stack]# vim values.yaml
 227   imageRegistry: "reg.exam.com"

----------------------------------------------------------------------------------
#导入镜像
[root@k8s-master prometheus]# docker load -i prometheus-62.6.0.tar 
Loaded image: quay.io/prometheus/prometheus:v2.54.1
Loaded image: quay.io/thanos/thanos:v0.36.1
Loaded image: quay.io/prometheus/alertmanager:v0.27.0
Loaded image: quay.io/prometheus-operator/admission-webhook:v0.76.1
Loaded image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6
Loaded image: quay.io/prometheus-operator/prometheus-operator:v0.76.1
Loaded image: quay.io/prometheus-operator/prometheus-config-reloader:v0.76.1

#打包并上传镜像
[root@k8s-master prometheus]# docker tag quay.io/prometheus/prometheus:v2.54.1 reg.exam.com/prometheus/prometheus:v2.54.1
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus/prometheus:v2.54.1

[root@k8s-master prometheus]# docker tag quay.io/thanos/thanos:v0.36.1 reg.exam.com/thanos/thanos:v0.36.1
[root@k8s-master prometheus]# docker push reg.exam.com/thanos/thanos:v0.36.1

[root@k8s-master prometheus]# docker tag quay.io/prometheus/alertmanager:v0.27.0 reg.exam.com/prometheus/alertmanager:v0.27.0
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus/alertmanager:v0.27.0

[root@k8s-master prometheus]# docker tag quay.io/prometheus-operator/admission-webhook:v0.76.1 reg.exam.com/prometheus-operator/admission-webhook:v0.76.1
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus-operator/admission-webhook:v0.76.1

[root@k8s-master prometheus]# docker tag registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6 reg.exam.com/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6
[root@k8s-master prometheus]# docker push reg.exam.com/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6

[root@k8s-master prometheus]# docker tag quay.io/prometheus-operator/prometheus-operator:v0.76.1 reg.exam.com/prometheus-operator/prometheus-operator:v0.76.1
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus-operator/prometheus-operator:v0.76.1

[root@k8s-master prometheus]# docker tag quay.io/prometheus-operator/prometheus-config-reloader:v0.76.1 reg.exam.com/prometheus-operator/prometheus-config-reloader:v0.76.1
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus-operator/prometheus-config-reloader:v0.76.1

#更改仓库地址
[root@k8s-master prometheus]# cd kube-prometheus-stack/charts/grafana/

[root@k8s-master grafana]# pwd
/root/prometheus/kube-prometheus-stack/charts/grafana

[root@k8s-master grafana]# vim values.yaml 
3   	imageRegistry: "reg.exam.com"
418     tag: "latest"

-----------------------------------------------------------------------------

#导入grafana镜像包
[root@k8s-master prometheus]# docker load -i grafana-11.2.0.tar 
Loaded image: grafana/grafana:11.2.0
Loaded image: quay.io/kiwigrid/k8s-sidecar:1.27.4
Loaded image: grafana/grafana-image-renderer:latest
Loaded image: bats/bats:v1.4.1

#打包上传到harbor仓库
[root@k8s-master prometheus]# docker tag grafana/grafana:11.2.0 reg.exam.com/grafana/grafana:11.2.0
[root@k8s-master prometheus]# docker push reg.exam.com/grafana/grafana:11.2.0

[root@k8s-master prometheus]# docker tag quay.io/kiwigrid/k8s-sidecar:1.27.4 reg.exam.com/kiwigrid/k8s-sidecar:1.27.4
[root@k8s-master prometheus]# docker push reg.exam.com/kiwigrid/k8s-sidecar:1.27.4

[root@k8s-master prometheus]# docker tag grafana/grafana-image-renderer:latest reg.exam.com/grafana/grafana-image-renderer:latest
[root@k8s-master prometheus]# docker push reg.exam.com/grafana/grafana-image-renderer:latest

[root@k8s-master prometheus]# docker tag bats/bats:v1.4.1 reg.exam.com/bats/bats:v1.4.1
[root@k8s-master prometheus]# docker push reg.exam.com/bats/bats:v1.4.1 

#修改配置文件中的仓库地址
[root@k8s-master kube-state-metrics]# pwd
/root/prometheus/kube-prometheus-stack/charts/kube-state-metrics
[root@k8s-master kube-state-metrics]# ls
Chart.yaml  README.md  templates  values.yaml
[root@k8s-master kube-state-metrics]# vim values.yaml 
  4   registry: reg.exam.com
 29   imageRegistry: "reg.exam.com"

-------------------------------------------------------------------------------------

#导入镜像
[root@k8s-master prometheus]# docker load -i kube-state-metrics-2.13.0.tar 
Loaded image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0
Loaded image: quay.io/brancz/kube-rbac-proxy:v0.18.0

#打包上传
[root@k8s-master prometheus]# docker tag registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0 reg.exam.com/kube-state-metrics/kube-state-metrics:v2.13.0
[root@k8s-master prometheus]# docker push reg.exam.com/kube-state-metrics/kube-state-metrics:v2.13.0

[root@k8s-master prometheus]# docker tag quay.io/brancz/kube-rbac-proxy:v0.18.0 reg.exam.com/brancz/kube-rbac-proxy:v0.18.0
[root@k8s-master prometheus]# docker push reg.exam.com/brancz/kube-rbac-proxy:v0.18.0

#修改node监控配置文件仓库地址
[root@k8s-master prometheus-node-exporter]# pwd
/root/prometheus/kube-prometheus-stack/charts/prometheus-node-exporter
[root@k8s-master prometheus-node-exporter]# ls
Chart.yaml  ci  README.md  templates  values.yaml

[root@k8s-master prometheus-node-exporter]# vim values.yaml 
  5   registry: reg.exam.com
 36   imageRegistry: "reg.exam.com"
 
------------------------------------------------------------------------------

#导入node镜像
[root@k8s-master prometheus]# docker load -i node-exporter-1.8.2.tar 
Loaded image: quay.io/prometheus/node-exporter:v1.8.2
Loaded image: quay.io/brancz/kube-rbac-proxy:v0.18.0	#已上传

#打包上传
[root@k8s-master prometheus]# docker tag quay.io/prometheus/node-exporter:v1.8.2 reg.exam.com/prometheus/node-exporter:v1.8.2
[root@k8s-master prometheus]# docker push reg.exam.com/prometheus/node-exporter:v1.8.2

[root@k8s-master prometheus]# docker tag quay.io/brancz/kube-rbac-proxy:v0.18.0 reg.exam.com/brancz/kube-rbac-proxy:v0.18.0 
[root@k8s-master prometheus]# docker push reg.exam.com/brancz/kube-rbac-proxy:v0.18.0

=================================================================================

创建命名空间

[root@k8s-master prometheus]# kubectl create namespace kube-prometheus-stack
namespace/kube-prometheus-stack created

[root@k8s-master prometheus]# kubectl get namespaces 
NAME                    STATUS   AGE
default                 Active   154m
kube-node-lease         Active   154m
kube-prometheus-stack   Active   8s
kube-public             Active   154m
kube-system             Active   154m
metallb-system          Active   54m

利用helm安装Prometheus !注意,在安装过程中千万别ctrl+c!

[root@k8s-master prometheus]# cd kube-prometheus-stack/
[root@k8s-master kube-prometheus-stack]# 

# . 代表当前位置/root/prometheus/kube-prometheus-stack
[root@k8s-master kube-prometheus-stack]# helm -n kube-prometheus-stack install kube-prometheus-stack .
NAME: kube-prometheus-stack
LAST DEPLOYED: Thu Sep 12 20:54:37 2024
NAMESPACE: kube-prometheus-stack
STATUS: deployed
REVISION: 1
NOTES:
kube-prometheus-stack has been installed. Check its status by running:
  kubectl --namespace kube-prometheus-stack get pods -l "release=kube-prometheus-stack"

Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.


查看所有pod是否运行
[root@k8s-master kube-prometheus-stack]# kubectl --namespace kube-prometheus-stack get pods
NAME                                                        READY   STATUS    RESTARTS   AGE
alertmanager-kube-prometheus-stack-alertmanager-0           2/2     Running   0          23m
kube-prometheus-stack-grafana-548c8fb6c4-29qdc              3/3     Running   0          23m
kube-prometheus-stack-kube-state-metrics-6688476957-n26gn   1/1     Running   0          23m
kube-prometheus-stack-operator-587f4b669b-8ztmk             1/1     Running   0          23m
kube-prometheus-stack-prometheus-node-exporter-j6j4t        1/1     Running   0          23m
kube-prometheus-stack-prometheus-node-exporter-pccpc        1/1     Running   0          23m
kube-prometheus-stack-prometheus-node-exporter-t77b8        1/1     Running   0          23m
prometheus-kube-prometheus-stack-prometheus-0               2/2     Running   0          23m

查看svc
[root@k8s-master kube-prometheus-stack]# kubectl -n kube-prometheus-stack get svc
NAME                                             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
alertmanager-operated                            ClusterIP   None             <none>        9093/TCP,9094/TCP,9094/UDP   23m
kube-prometheus-stack-alertmanager               ClusterIP   10.104.96.90     <none>        9093/TCP,8080/TCP            23m
kube-prometheus-stack-grafana                    ClusterIP   10.103.122.224   <none>        80/TCP                       23m
kube-prometheus-stack-kube-state-metrics         ClusterIP   10.104.185.222   <none>        8080/TCP                     23m
kube-prometheus-stack-operator                   ClusterIP   10.98.25.116     <none>        443/TCP                      23m
kube-prometheus-stack-prometheus                 ClusterIP   10.102.144.68    <none>        9090/TCP,8080/TCP            23m
kube-prometheus-stack-prometheus-node-exporter   ClusterIP   10.98.117.125    <none>        9100/TCP                     23m
prometheus-operated                              ClusterIP   None             <none>        9090/TCP                     23m

修改暴漏方式
[root@k8s-master kube-prometheus-stack]# kubectl -n kube-prometheus-stack edit svc kube-prometheus-stack-grafana 
39   type: LoadBalancer

各个svc的作用
alertmanager-operated 			告警管理
kube-prometheus-stack-grafana 	 展示prometheus采集到的指标
kube-prometheus-stack-prometheus-node-exporter 收集节点级别的指标的工具
kube-prometheus-stack-prometheus 主程序

8 登陆grafana图形化界面

查看grafana密码
[root@k8s-master helm]# kubectl -n kube-prometheus-stack get secrets kube-prometheus-stack-grafana -o yaml
apiVersion: v1
data:
  admin-password: cHJvbS1vcGVyYXRvcg==
  admin-user: YWRtaW4=
  ldap-toml: ""
kind: Secret
metadata:
  annotations:
    meta.helm.sh/release-name: kube-prometheus-stack
    meta.helm.sh/release-namespace: kube-prometheus-stack
  creationTimestamp: "2024-09-12T12:54:47Z"
  labels:
    app.kubernetes.io/instance: kube-prometheus-stack
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: grafana
    app.kubernetes.io/version: 11.2.0
    helm.sh/chart: grafana-8.5.1
  name: kube-prometheus-stack-grafana
  namespace: kube-prometheus-stack
  resourceVersion: "16943"
  uid: d19640ae-4b79-4013-ba03-e039fc98b493
type: Opaque

查看密码
[root@k8s-master helm]# echo -n "cHJvbS1vcGVyYXRvcg==" | base64 -d
prom-operator		#密码

prom-operator[root@k8s-master helm]# echo "YWRtaW4=" | base64 -d
admin				#用户

[root@k8s-master helm]# kubectl -n kube-prometheus-stack get svc
NAME                                             TYPE           CLUSTER-IP       EXTERNAL-IP     PORT(S)                      AGE
alertmanager-operated                            ClusterIP      None             <none>          9093/TCP,9094/TCP,9094/UDP   29m
kube-prometheus-stack-alertmanager               ClusterIP      10.104.96.90     <none>          9093/TCP,8080/TCP            29m
kube-prometheus-stack-grafana                    LoadBalancer   10.103.122.224   172.25.250.50   80:31471/TCP                 29m
kube-prometheus-stack-kube-state-metrics         ClusterIP      10.104.185.222   <none>          8080/TCP                     29m
kube-prometheus-stack-operator                   ClusterIP      10.98.25.116     <none>          443/TCP                      29m
kube-prometheus-stack-prometheus                 ClusterIP      10.102.144.68    <none>          9090/TCP,8080/TCP            29m
kube-prometheus-stack-prometheus-node-exporter   ClusterIP      10.98.117.125    <none>          9100/TCP                     29m
prometheus-operated                              ClusterIP      None             <none>          9090/TCP                     29m

#用分配的IP在网页查看

9 导入面板

官方监控模板:Grafana dashboards | Grafana Labs

访问Prometheus 主程序

[root@k8s-master helm]# kubectl -n kube-prometheus-stack edit svc kube-prometheus-stack-prometheus
48   type: LoadBalancer

[root@k8s-master helm]# kubectl -n kube-prometheus-stack get svc kube-prometheus-stack-prometheus
NAME                               TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                         AGE
kube-prometheus-stack-prometheus   LoadBalancer   10.102.144.68   172.25.250.51   9090:30607/TCP,8080:32132/TCP   43m

网页登录172.25.250.51:9090

10 监控使用示例

建立监控项目

[root@k8s-master ~]# mkdir test
[root@k8s-master ~]# cd test/
[root@k8s-master test]# ls
nginx-18.1.11.tgz  nginx-exporter-1.3.0-debian-12-r2.tar

[root@k8s-master test]# tar zxf nginx-18.1.11.tgz 
[root@k8s-master test]# cd nginx/

修改项目开启监控
[root@k8s-master nginx]# vim values.yaml 
 925 metrics:
 926   ## @param metrics.enabled Start a Prometheus exporter sidecar container
 927   ##
 928   enabled: true		#改为true
...
1015   serviceMonitor:
1016     ## @param metrics.serviceMonitor.enabled Creates a Prometheus Operator ServiceMonitor (also requires `metrics.enable     d` to be `true`)
1017     ##
1018     enabled: true			#改为true
1019     ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running
1020     ##
1021     namespace: "kube-prometheus-stack"		#更改命名空间
1022     ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prom     etheus.
1023     ##
...
1046     labels: 
1047       release: kube-prometheus-stack		#添加指定监控标签

#查看标签
[root@k8s-master nginx]# kubectl -n kube-prometheus-stack get servicemonitors.monitoring.coreos.com --show-labels 

安装项目,在安装之前一定要上传镜像到仓库中

[root@k8s-master nginx]# ls
nginx-1.27.1-debian-12-r2.tar

#第一个nginx
[root@k8s-master nginx]# docker load -i nginx-1.27.1-debian-12-r2.tar
30f5b1069b7f: Loading layer [==================================================>]  190.1MB/190.1MB
Loaded image: bitnami/nginx:1.27.1-debian-12-r2

[root@k8s-master nginx]# docker tag bitnami/nginx:1.27.1-debian-12-r2 reg.exam.com/bitnami/nginx:1.27.1-debian-12-r2
[root@k8s-master nginx]# docker push reg.exam.com/bitnami/nginx:1.27.1-debian-12-r2

#第二个nginx
[root@k8s-master nginx]# docker load -i nginx-exporter-1.3.0-debian-12-r2.tar 
016ff07f0ae3: Loading layer [==================================================>]  149.3MB/149.3MB
Loaded image: bitnami/nginx-exporter:1.3.0-debian-12-r2

[root@k8s-master nginx]# docker tag bitnami/nginx-exporter:1.3.0-debian-12-r2 reg.exam.com/bitnami/nginx-exporter:1.3.0-debian-12-r2
[root@k8s-master nginx]# docker push reg.exam.com/bitnami/nginx-exporter:1.3.0-debian-12-r2


#安装chart包
[root@k8s-master nginx]# helm install howe .
NAME: howe
LAST DEPLOYED: Thu Sep 12 21:52:15 2024
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: nginx
CHART VERSION: 18.1.11
APP VERSION: 1.27.1

[root@k8s-master nginx]# kubectl get pods
NAME                          READY   STATUS    RESTARTS   AGE
howe-nginx-54c97cb888-x5hhh   2/2     Running   0          21s

[root@k8s-master nginx]# kubectl get svc
NAME         TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)                                     AGE
kubernetes   ClusterIP      10.96.0.1       <none>          443/TCP                                     4h56m
test-nginx   LoadBalancer   10.102.161.61   172.25.250.52   80:30614/TCP,443:31390/TCP,9113:32254/TCP   30s

[root@k8s-master nginx]# curl 172.25.250.52
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>


压力测试:
[root@k8s-master nginx]# ab -c 5 -n 100 http://172.25.250.52/index.html
This is ApacheBench, Version 2.3 <$Revision: 1903618 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/

Benchmarking 172.25.250.52 (be patient).....done


Server Software:        nginx
Server Hostname:        172.25.250.52
Server Port:            80

Document Path:          /index.html
Document Length:        615 bytes

Concurrency Level:      5
Time taken for tests:   0.033 seconds
Complete requests:      100
Failed requests:        0
Total transferred:      87000 bytes
HTML transferred:       61500 bytes
Requests per second:    2991.15 [#/sec] (mean)
Time per request:       1.672 [ms] (mean)
Time per request:       0.334 [ms] (mean, across all concurrent requests)
Transfer rate:          2541.31 [Kbytes/sec] received

Connection Times (ms)
              min  mean[+/-sd] median   max
Connect:        0    1   0.1      0       1
Processing:     0    1   0.9      1       6
Waiting:        0    1   0.8      1       6
Total:          1    1   0.9      1       6
ERROR: The median and mean for the initial connection time are more than twice the standard
       deviation apart. These results are NOT reliable.

Percentage of the requests served within a certain time (ms)
  50%      1
  66%      1
  75%      1
  80%      2
  90%      2
  95%      3
  98%      6
  99%      6
 100%      6 (longest request)

监控调整


http://www.kler.cn/a/312565.html

相关文章:

  • 《云原生安全攻防》-- K8s安全防护思路
  • 论软件维护及其应用子问题
  • Python的Web请求:requests库入门与应用
  • Linux——gcc编译过程详解与ACM时间和进度条的制作
  • go do sth和come do sth的区别
  • 灵活就业,真的等同于失业吗?“三无人员”如何齐短板获贷款
  • 基于正点原子Linux开发板的智能监控与家电控制系统设计:深度解析Video4Linux和TCP/IP技术栈
  • android 删除系统原有的debug.keystore,系统运行的时候,重新生成新的debug.keystore,来完成App的运行。
  • Web开发:Thymeleaf模板引擎
  • Redis系列之底层数据结构SDS
  • 编程技巧:SQL 处理超大查询
  • 对商品分类系统的若干问题的思考
  • 【Linux】程序地址空间
  • 数据库函数
  • C++_CH18_构造函数与析构函数
  • Java优先级队列PriorityQueue
  • 大数据Flink(一百二十二):阿里云Flink MySQL连接器介绍
  • 将阮一峰老师的《ES6入门教程》的源码拷贝本地运行和发布
  • 【深度学习】注意力机制介绍,了解什么是注意力计算规则以及常见的计算规则,知道注意力机制的工作流程
  • Linux 基础入门操作-实验一 GCC使用
  • 优化 Elasticsearch 集群性能:解决节点压力不均衡问题及分片策略调整
  • git统计代码行数、提交数
  • 每日OJ题_牛客_WY22 Fibonacci数列(斐波那契)
  • 解决uniapp视频video组件进入全屏再退出全屏后,cover-view失效的问题
  • C++——用选择法对10个数值进行排序。
  • 即时通讯框架MobileIMSDK的H5端开发快速入门