sealos部署K8s,安装docker时master节点突然NotReady
1、集群正常运行中,在集群master-1上安装了docker+harbor,却发现master-1节点NotReady,使用的网络插件为 Cilium
#安装docker和harbor(docker运行正常)
root@master-1:/etc/apt# apt install docker-ce=5:19.03.15~3-0~ubuntu-focal docker-ce-cli=5:19.03.15~3-0~ubuntu-focal docker-compose
#安装harbor(harbor运行正常)
root@master-1:/usr/local/harbor# ./install.sh --with-trivy
#开始出现问题
root@master-1:~# crictl images
FATA[0000] validate service connection: validate CRI v1 image API for endpoint “unix:///var/run/image-cri-shim.sock”: rpc error: code = Unimplemented desc = unknown service runtime.v1.ImageService
#验证sock存在
root@master-1:~# ll /var/run/image-cri-shim.sock
srw-rw---- 1 root root 0 Nov 8 13:26 /var/run/image-cri-shim.sock=
#服务状态没有明显异常
root@master-1:~# systemctl status containerd.service image-cri-shim.service
#查看kubelet服务启动失败
root@master-1:~# systemctl status kubelet.service
#检查服务日志看到明显报错(基本来看,这是gRPC 的错误,由于kubelet无法找到 CRI v1 运行时服务而引起的。)
root@master-1:~# journalctl -u kubelet.service
查看节点信息master-1失败
root@master-2:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node-2 Ready <none> 9d v1.27.7
master-1 NotReady control-plane 9d v1.27.7
master-2 Ready control-plane 9d v1.27.7
master-3 Ready control-plane 9d v1.27.7
node-1 Ready <none> 9d v1.27.7
看了一下节点的iptables规则
root@master-1:~# iptables -L -v -n
对比了一下master-2节点的(这看起来明显是docker把所有的配置都篡改了)
root@master-2:/etc/containerd# iptables -L -v -n
这里我停了docker和harbor
docker-compose down
systemctl disable --now docker
检查sock文件是存在的
root@master-2:~# ll /run/containerd/containerd.sock
srw-rw---- 1 root root 0 Nov 8 03:20 /run/containerd/containerd.sock=
查看containerd配置文件(这里发现了错误,和master-2节点对比发现文件不同,感觉是安装docker时有被篡改的感觉配置完全变了)
root@master-1:~# vim /etc/containerd/config.toml
root@master-2:~# vim /etc/containerd/config.toml
2、解决
将master-2节点上的拷贝过来
#拷贝文件
root@master-2:~# scp /etc/containerd/config.toml root@172.16.100.241://etc/containerd/config.toml
#重启服务
root@master-1:~# systemctl daemon-reload && systemctl restart containerd kubelet
#查看状态都正常
root@master-1:~# systemctl status containerd kubelet
#查看集群状态
root@master-1:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node-2 Ready <none> 10d v1.27.7
master-1 Ready control-plane 10d v1.27.7
master-2 Ready control-plane 10d v1.27.7
master-3 Ready control-plane 10d v1.27.7
node-1 Ready <none> 10d v1.27.7
查看一下etcd的状态
#查看了etcd的状态和集群的选举都是OK的,自查完成
root@master-1:~# kubectl exec -it etcd-master-1 -n kube-system \
> -- etcdctl --write-out=table member list --endpoints=https://172.16.100.241:2379 \
> --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt \
> --key=/etc/kubernetes/pki/etcd/server.key endpoint health