当前位置: 首页 > article >正文

Kubernetes | Rocky Linux 8.9 安装部署 kubernetes集群

基于Rocky Linux 8.9 安装部署 kubernetes集群

1. 环境初始化(所有集群节点都需执行)

1.1 主机名与ip地址解析

#主机名与ip地址解析
cat >> /etc/hosts <<EOF
11.0.1.8 k8s-master01.dinginx.org k8s-master01 kubeapi.dinginx.org k8sapi.dinginx.org kubeapi
11.0.1.18 k8s-node01.dinginx.org k8s-node01
11.0.1.28 k8s-node02.dinginx.org k8s-node02
EOF

1.2 安装必要的软件包

#安装必要工具
yum update -y && yum install -y vim curl wget git yum-utils

1.3 禁用swap

sed -i '/swap/s/^/# /' /etc/fstab && swapoff -a

#验证
[root@k8s-master01 ~]# free -h
              total        used        free      shared  buff/cache   available
Mem:          1.7Gi       843Mi        74Mi       9.0Mi       816Mi       715Mi
Swap:            0B          0B          0B

1.4 时钟同步

#修改时区
timedatectl set-timezone Asia/Shanghai
#时钟同步工具
apt install ntpdate
ntpdate ntp.aliyun.com

1.5 关闭防火墙

systemctl stop firewalld.service 
systemctl disable firewalld.service 
systemctl status firewalld.service 

1.6 禁用 SELinux

sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

1.7 添加网桥过滤和内核转发配置文件

#添加网桥过滤和内核转发配置文件
cat >> /etc/sysctl.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward=1
EOF
#刷新配置
sysctl -p

2 安装 Docker(所有节点都需执行)

2.1 安装docker及依赖

yum install -y dnf-plugins-core
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce

2.2 启动并配置docker镜像加速

# 配置 Docker 镜像加速
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": [
    "https://dockerpull.org",
    "https://docker.888666222.xyz",
    "https://docker.211678.top",
    "https://docker.1panel.live",
    "https://hub.rat.dev",
    "https://docker.m.daocloud.io",
    "https://do.nark.eu.org",
    "https://dockerpull.com",
    "https://dockerproxy.cn",
    "https://docker.awsl9527.cn"
  ],
  "live-restore": true
}
EOF

#重新加载配置
systemctl daemon-reload && systemctl restart docker

#配置开机自启动
systemctl enable docker

2.3 安装 cri-dockerd

curl -LO https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.10/cri-dockerd-0.3.10-3.el8.x86_64.rpm
yum install cri-dockerd-0.3.10-3.el8.x86_64.rpm
systemctl enable --now cri-docker

修改 cri-docker 配置文件:

#修改 cri-docker 配置文件
sed -i.bak '/^ExecStart/c ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-cache-dir=/var/lib/cni/cache --cni-conf-dir=/etc/cni/net.d' /lib/systemd/system/cri-docker.service
#重新加载配置
systemctl daemon-reload && systemctl restart cri-docker.service

3 安装 Kubernetes 依赖包(所有节点都须执行)

3.1 安装 kubeadmkubeletkubectl

cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/rpm/repodata/repomd.xml.key
EOF

yum install -y --nogpgcheck kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet

3.2 修改 kubelet 配置文件

cat <<EOF | tee > /var/lib/kubelet/config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
containerRuntime: remote
containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock
EOF

设置为开机自启并现在立刻启动服务 --now:立刻启动服务

# 设置为开机自启并现在立刻启动服务 --now:立刻启动服务
sudo systemctl enable --now kubelet
# 查看状态
systemctl status kubelet

#会出现kubelet启动失败(忽略即可)
重新安装(或第一次安装)k8s集群,未经过kubeadm init 或者 kubeadm join后,kubelet会不断重启,这个是正常现象,执行init或join后问题会自动解决,对此官网有如下描述。

4 初始化 Kubernetes 主节点(控制平面执行)

4.1 使用 kubeadm 初始化 Kubernetes 控制平面节点:

kubeadm init --control-plane-endpoint="kubeapi.dinginx.org" \
  --kubernetes-version=v1.29.13 \
  --pod-network-cidr=10.244.0.0/16 \
  --service-cidr=10.96.0.0/12 \
  --token-ttl=0 \
  --cri-socket unix:///run/cri-dockerd.sock \
  --image-repository=registry.aliyuncs.com/google_containers
  
#修改镜像 tag
docker image tag registry.aliyuncs.com/google_containers/pause:3.9 registry.k8s.io/pause:3.9

#如果下载失败可选择手动下载
kubeadm config images pull --cri-socket unix:///run/cri-dockerd.sock --image-repository=registry.aliyuncs.com/google_containers

4.2 初始化成功后,会显示一个命令,用于让工作节点加入集群

......

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
#添加control 平面节点命令
  kubeadm join kubeapi.dinginx.org:6443 --token c1gr4y.rac06zzabjnu9abn \
	--discovery-token-ca-cert-hash sha256:79ae54d0bcb74bc877a6b4c7a93a3ac32d6db560de7ef2588371ca17ad3b377d \
	--control-plane 

Then you can join any number of worker nodes by running the following on each as root:

#添加worker节点命令
kubeadm join kubeapi.dinginx.org:6443 --token c1gr4y.rac06zzabjnu9abn \
	--discovery-token-ca-cert-hash sha256:79ae54d0bcb74bc877a6b4c7a93a3ac32d6db560de7ef2588371ca17ad3b377d 
kubeadm join <Master-Node-IP>:6443 --token <token> --discovery-token-ca-cert-hash sha256:<hash>

4.3 配置 kubectl

为了让当前用户能够管理 Kubernetes 集群,需要设置 kubectl 的配置:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

4.4 查看状态

#出现以下内容证明集群部署成功
[root@k8s-master01 ~]# kubectl config view --raw
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJVUZNZ3lQSG1rYTB3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRBeU1EUXdOVEk1TkRWYUZ3MHpOVEF5TURJd05UTTBORFZhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURJQnU5Mk50MWh6QUlwbkZ6WlZiRzRERS96U3Y3enM5Q0Jrc3FxU2NMMEkxanlGNGo5MUVpVkxYYW8KVVh5TXY0ZmRiKzUwbjRIVkNVZFg0R1JwTUE0Q3Z0elBnNERnMGcweHJESDRTRkM5SldQZUxxck1qZ3c0ODhJRgpQcUowM2dic1RpK04zSnFJWFNZMVFPZlc1bmEyWmJWdHY1VHE0V3BycVB2TVRHMTlPMTlXREl0aDNacCtxQWNyCnA0NjR2OUd2UWpRQy9Ub0gxMXlYQzJhVmxmNmpYc0p3R0FUSUtJenpjVGR0MFlNWS9NZkcrdTgvZ0UzNTNGdXMKdkp4MGdSMWtITlcxTzNpMW04ck5xWFJSY0ROeHF1aFUvWDhsdlFwK09SbDM0WjJLYXNsNGJFTlpubE11ZDVaMApZYkl4bi82M0J3bXk0NVhZM0QwMDRYRjY2YThYQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTcjJEZUFlQTh0Q3Zqbmc0Zmo3SThxOEJsSXN6QVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQmp6REdDRmowYwp4TkNhZmdDMTlLWlBSdThOT2pDOExzVEJzMHlOaEVZKzZLcU00bVo3alp4dnVVekUrUUE1SDVPK0hRUGpMUDhjCjdTZ2xiT1Z1Sm9CRXZJaEFDdGFCVElLMlk5MDdDc2NsUHlRaUx5VTZGbHU1Mk9uSEpxdkJ3Z1k3bGFRQXdaNEEKN1dwNTcwYThheFVsTWYwNkRtRUZLYjNBYXVhcGZ2VEJibHBkSHRtV2FBemdwUUt5TS9Vd0N4ZEhqdzJCeE5WaQpZZkJzQ1BmZmx6ek9maFVoMUNZNTY2Ukpxc0pCeW5KVWlmczBjQ2gzbnB2VlU0Smowa3p1ZHZnVXVhazZHdVh2Ck4wL3BidHpCd214N3RXZk00aFNxbmdvSURLVllwWDRmeEZ3M2U4cHdLTDVlK3pnOU9qMGtvVkNpUFV6eUY4bGYKb3k0TVg4NC8rUXBSCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    server: https://kubeapi.dinginx.org:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJRHA1WklpVnFXY3N3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRBeU1EUXdOVEk1TkRWYUZ3MHlOakF5TURRd05UTTBORFphTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEV2ZiM0EKbFZhUTczK1E5cTRlaUJjdmpXd2lYbXdFbi84cEJRNkRjblhFMGc2dHc1NjJhdThDQ0RKR0ZUWHJuVkMraU1jcQpOU0Rla0xBV1hOOHJqdzJUSzBRVDlTL21iMTlQb2JCTzdlenlZaVZsc0grTllSQjMxN083UXdMUm1XWW5Wb2NHCnJuYVJoZkxBemFMY1NMdHU0U3JwUWZ6TGlaa24vNzhoNk5NYkNzZGlHc2NQalNWazBYNlo3VzFyL0tONWZnaXMKeDhjd01EV3I1b1REL3UyZlBPN3czdk1rSktvKzN0QmoxdzFPVjRHUVlEaERBM0VZN3NIL1JkUjhZRWROZTZ0OQp0NDZLZVdOTjd0a0tpVThSMHBBSXRtdjJPVW53YW0wVUd3UmxhcWNDZlZ5ZTcvWlByYTE1MFNHZkdPK3o0RFVqCnNXNzRDak4xS0ZLVzQ2UHpBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkt2WU40QjREeTBLK09lRApoK1Bzanlyd0dVaXpNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNLNXlmcTVNL2tsaUpIZkZXVnBSUno2UTZLCjVWMmc2ejY3b3FhcVJtY2NId01jSFMzUDNUeXcwdEkxK3pSa1Y5MVVIZ3JuWkJUdTBUZlI2US8wbjU4dGR0dzgKb2pQdml4Q3ltZVdzSlBHaE9nUTAwdGFCM0xMclEyUVlyZ0tla1hGYVIzbXg0eUdBN3FSUjR3Zlh4SVpIdUQyTgprWUpwaEowOUNxY3dkaFdUWS9sMHVoQ0tkT2k2NVRQL3lmSTNrSllTaWVBU0NXLzgxM1FlWlVneThHdCtURS9LClJTYUovQ04wbEczbE5CcDM1eGE3b1hsT0VqNUhyMm81UU93QlBlS0VsYkxyUXhQRmYyRFhDcGpRMHpjOVNFWE4KZlZMdGgrMlU2M2docElZSEVhanY1RGxMaTg0bnRyUHNlNjk0N1phVmVrZFpCS25aemNOQXNwMm9NLzFqCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMW4yOXdKVldrTzkva1BhdUhvZ1hMNDFzSWw1c0JKLy9LUVVPZzNKMXhOSU9yY09lCnRtcnZBZ2d5UmhVMTY1MVF2b2pIS2pVZzNwQ3dGbHpmSzQ4Tmt5dEVFL1V2NW05ZlQ2R3dUdTNzOG1JbFpiQi8KaldFUWQ5ZXp1ME1DMFpsbUoxYUhCcTUya1lYeXdNMmkzRWk3YnVFcTZVSDh5NG1aSi8rL0llalRHd3JIWWhySApENDBsWk5GK21lMXRhL3lqZVg0SXJNZkhNREExcSthRXcvN3Ruenp1OE43ekpDU3FQdDdRWTljTlRsZUJrR0E0ClF3TnhHTzdCLzBYVWZHQkhUWHVyZmJlT2lubGpUZTdaQ29sUEVkS1FDTFpyOWpsSjhHcHRGQnNFWldxbkFuMWMKbnUvMlQ2MnRlZEVobnhqdnMrQTFJN0Z1K0FvemRTaFNsdU9qOHdJREFRQUJBb0lCQUFMbGZjdDgrTWRLUkttNwpld3gwZ0FPbllEOXlrUGpiQkVJVWpHYWdCQkVkV1E5bHBkNEd3YVdBWU5WcmNlZjFudUtEekZTNGxQSU1lQ1JGCktzcFd5ek1pa1cvSGp0Y0l4L3UyVGtBeW5GR0ZtOFdzeFh3dVJGK3A3UVpwV0Q5bVYrZk9GbEt6eG13ZXAvR00KbFZkdU1BSEdUcU1KdmhVdGpZRHB4alg2QStmSWppSGc4UlpqU1VzajVGeDJOMmFwb29CWnlLTFVJYStuU1dCQwpSTEdsL213ekZKTzJubS9JbVRPemlQbVF3TkVsTGpOOHRac2tTQmdOZkdWV0YySnV2RWI0QkhHa3BlRHVpZ2ppCmlDOVMvRkhydHVLSUFabjZFSllNQVlNTXIxekh3RXlobktjV1kyTEorR2Vjblh6MkVZTzZacmpqbXdrVmRuSGsKajJ5T1JZRUNnWUVBL05RUjdTUU5XZnZuR2ZUcjZKYTh5ZU9Mbm1uWTJRQ0pCYW1yZHdqMWt5TGt2YTEwdzl3YQpTZFdRa2dGRWNDbEgycUhxYmVkMTFKSWJOdWYxSWNJZi9jNEtManpsNjkxR2QvRlBsQXBsUXA5eGxTcG1SZk1iCmg1bEtNMFZrM2oya1NzaVorWmNyR3paTCtvWXlSSkV5R29ZUDR2REp2c3BqdjVWSGpaT3dSY0VDZ1lFQTJTNk8KTm1PNXZXYldTZTRGNWNXZVpHLzRmbFU1YVhtMmdwU3pzT2FFUlZBN0NvKzVabW9wNlNXeGpqRjU2bWgxODY3WQorMGxjcUVvenZTOTQvS0RDMUc1S203bHFnWWhYWUdSRXRaRURkdXh3WGdQeTkzSUFZV2R4V1FnMzZQeGFKdVFBCmhVQXA5ajBsdzltVXRkWXpvOFg5NVEvUEgxaWJUcjdtZHBhR1hyTUNnWUVBa1NvK3BMZEhoendRaENIUmRmbGUKcWxpaEtMTERsRTB2WDNYOUFscTlqZDdDRVhwUGljbU51b2tKK2hzS0xaQTJObjB6T2lsa3lVbWVvSmJzYzVhZgpnYjNVc0ptOHRnR1YzWFpMWjhmVVBqMklITXA4Y0h0V2VqUDBtNTJqZmN0TTByZDlXUWFIOUpiblp3KzRPeEJYCkFVMnFQM1piajlWckVteG02ZzQ3YnNFQ2dZQWczUitkOFZkQTV0ZlVWaFZFWUxRSUpMMW9lSUw0aC9kcktROUwKM0lYMWF4K1VDSTJZaHNZU3hxWm9pdW9VNDN1OXhSNkdYK0tLZ0xmMWo4SnlEKzJMY2FrT2lXOHBiTFUwRzlnZAoxWFM5d1NkN3dvTTRtR0tnV3FQWFFod2J3RnZ2K0o1dmkzY09FTkN2SXpTU2szZzY5eEhsWUpBS0FVRjNHQUlOCnZkZFZhd0tCZ0YvZCtYOEZtWTYydnF1TTY3N0ZhdG0zRS9YSkpFVVFXM2NTSUFEeVFUbjQ1cUFJbHlwQW5kQVUKYmdFSHBIWkF2RmpwRlJlNk8vUWV1VWNFb1hveFVSbWlJNStjWGRzbGVBNlMvd1ZYajNpcGNaajJ2UHFlNE0yNApOUjdHYjRJeUNmTkM5d0loclVzS3hJamJFcHBOdS9rSXhibURYZGdDVS9zbzhlNVBZU2FOCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==

5 加入worker工作节点

#在每个工作节点上,运行之前记下的 `kubeadm join` 命令以将工作节点加入集群,k8s-node01、k8s-node02需执行
kubeadm join kubeapi.dinginx.org:6443 --token c1gr4y.rac06zzabjnu9abn \
--discovery-token-ca-cert-hash sha256:79ae54d0bcb74bc877a6b4c7a93a3ac32d6db560de7ef2588371ca17ad3b377d \
--cri-socket unix:///run/cri-dockerd.sock

#出现以下内容证明添加成功,添加worker01节点
[root@k8s-node01 ~]# kubeadm join kubeapi.dinginx.org:6443 --token c1gr4y.rac06zzabjnu9abn \
> --discovery-token-ca-cert-hash sha256:79ae54d0bcb74bc877a6b4c7a93a3ac32d6db560de7ef2588371ca17ad3b377d \
> --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

#添加worker02节点
[root@k8s-node02 ~]# kubeadm join kubeapi.dinginx.org:6443 --token c1gr4y.rac06zzabjnu9abn \
> --discovery-token-ca-cert-hash sha256:79ae54d0bcb74bc877a6b4c7a93a3ac32d6db560de7ef2588371ca17ad3b377d \
> --cri-socket unix:///run/cri-dockerd.sock
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

6 验证集群状态

#在控制节点上,检查集群和节点的状态:
[root@k8s-master01 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE   ERROR
etcd-0               Healthy   ok        
controller-manager   Healthy   ok        
scheduler            Healthy   ok

[root@k8s-master01 ~]# kubectl get nodes -owide
NAME                       STATUS   ROLES           AGE    VERSION    INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                           KERNEL-VERSION                CONTAINER-RUNTIME
k8s-master01.dinginx.org   Ready    control-plane   108m   v1.29.13   11.0.1.8      <none>        Rocky Linux 8.9 (Green Obsidian)   4.18.0-513.5.1.el8_9.x86_64   docker://26.1.3
k8s-node01                 Ready    <none>          57m    v1.29.13   11.0.1.18     <none>        Rocky Linux 8.9 (Green Obsidian)   4.18.0-513.5.1.el8_9.x86_64   docker://26.1.3
k8s-node02                 Ready    <none>          48m    v1.29.13   11.0.1.28     <none>        Rocky Linux 8.9 (Green Obsidian)   4.18.0-513.5.1.el8_9.x86_64   docker://26.1.3

7 安装 Pod 网络插件

为了让 Pod 能够相互通信,需要安装一个网络插件。你可以选择 Calico、Weave 或 Flannel 等。这里以flannel为例:

[root@k8s-master01 ~]# kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/v0.26.1/Documentation/kube-flannel.yml

[root@k8s-master01 ~]# kubectl -n kube-flannel get pods -owide
NAME                    READY   STATUS    RESTARTS   AGE   IP          NODE                       NOMINATED NODE   READINESS GATES
kube-flannel-ds-8wxbg   1/1     Running   0          39m   11.0.1.28   k8s-node02                 <none>           <none>
kube-flannel-ds-z64wf   1/1     Running   0          39m   11.0.1.18   k8s-node01                 <none>           <none>
kube-flannel-ds-z7dgv   1/1     Running   0          39m   11.0.1.8    k8s-master01.dinginx.org   <none>           <none>

8 部署测试web01应用

8.1 创建web01的yaml文件

[root@k8s-master01 ~]# cat web01.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: web01
---
apiVersion: v1
kind: namespace
metadata:
  name: web01
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    app: web01
  name: web01
  namespace: web01
spec:
  containers:
  - image: nginx
    name: web01
---
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: web01
  name: web01
  namespace: web01
spec:
  ports:
  - name: 80-80
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web01
  type: NodePort

8.2 创建应用

[root@k8s-master01 ~]# kubectl apply -f  web01.yaml 
namespace/web01 created
pod/web01 created
service/web01 created

8.3 验证

[root@k8s-master01 ~]# kubectl get svc -n web01 
NAME    TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
web01   NodePort   10.105.181.238   <none>        80:32211/TCP   47s
[root@k8s-master01 ~]# curl 10.105.181.238
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

在这里插入图片描述


http://www.kler.cn/a/533401.html

相关文章:

  • 更换IP属地会影响网络连接速度吗
  • Hive on Spark优化
  • 25.02.04 《CLR via C#》 笔记14
  • 人类心智逆向工程:AGI的认知科学基础
  • 蓝桥杯刷题DAY3:Horner 法则 前缀和+差分数组 贪心
  • 【数据结构】(4) 线性表 List
  • 4.回归与聚类算法 4.1线性回归
  • 学前端框架之前,你需要先理解 MVC
  • 【llm对话系统】大模型 Llama 如何进行量化和推理
  • FPV光纤无人机军事战场技术详解
  • 图像分类与目标检测算法
  • 基于全志H616的智能家居
  • R语言速通
  • PyQt6/PySide6 的 QDialog 类
  • Spring Security(maven项目) 3.0.3.1版本 - 动态JDBC认证
  • https是如何保证安全的,又是如何保证不被中间人攻击的?
  • 防火墙的安全策略
  • VMware ThinApp 和VMware Workstation
  • MyBatis 调优指南:释放持久层性能潜力
  • 论计算机网络技术专业如何?创新
  • Aosp 15 编译遇到问题排查
  • Docker数据卷管理及优化
  • 计算机网络之数据链路层(数据链路层的功能)
  • 最新道路运输安全员管理人员考试真题
  • 通过docker安装部署deepseek以及python实现
  • java:mysql切换达梦数据库(五分钟适配完成)