当前位置: 首页 > article >正文

8.28-回顾+容器与主机之间的通信+跨主机容器之间的通信

一、回顾

1.启动docker

  systemctl start docker

2.拉取registry

  docker pull registry

3.启动镜像,同时挂载目录(保存镜像)端口映射5000

  docker run -d -v /regist/:/var/lib/registry/ -p5000:5000 registry:latest

4.修改/etc/docker/daemon.json

  
  vim /etc/docker/daemon.json
  ​
  .....
  ,
  "insecure-registries":[
     "http://192.168.2.30:5000"
  ]

5.重启docker

  systemctl restart docker

6.访问测试仓库信息

  
  # 查看仓库目录
  [root@docker ~]# curl localhost:5000/v2/_catalog
  {"repositories":["centos","centosnginx"]}

7.上传

  
  # 打标签
  [root@docker ~]# docker tag centos:nginx 192.168.2.30:5000/centosnginx:v0
  ​
  # 将打标签的镜像上传
  [root@docker ~]# docker push 192.168.2.30:5000/centosnginx:v0
  The push refers to repository [192.168.2.30:5000/centosnginx]
  715d477d6a7d: Pushed 
  5700116c3db7: Pushed 
  82890c106451: Pushed 
  8c519ad003b5: Mounted from centos 
  acbca50d3a83: Mounted from centos 
  75cdf155cf76: Mounted from centos 
  07d4b334a739: Mounted from centos 
  74ddd0ec08fa: Mounted from centos 
  v0: digest: sha256:5fb4d1018f32c53f3b21f17771bfb6b9832e76b790426b9f4c067f4c75003deb size: 1997
  ​
  # 查看镜像
  [root@docker ~]# docker images
  REPOSITORY                      TAG       IMAGE ID       CREATED         SIZE
  192.168.2.30:5000/centosnginx   v0        4a14f7d33da9   18 hours ago    422MB
  centos                          nginx     4a14f7d33da9   18 hours ago    422MB
  centos                          httpd     ebbf109944f1   19 hours ago    338MB
  centos                          yum       69f3775bce0c   19 hours ago    272MB
  192.168.2.30:5000/centos        yum       ee9375ac855c   19 hours ago    272MB
  registry                        latest    cfb4d9904335   11 months ago   25.4MB
  centos                          latest    5d0da3dc9764   2 years ago     231MB

8.下载

  
  # 重建一台机器,尝试下载私有仓库的镜像
  ​
  # 上传docker.sh脚本
  [root@haha ~]# rz -E
  rz waiting to receive.
  ​
  # 运行docker脚本
  [root@haha ~]# source docker.sh 
  ​
  # 编辑配置文件
  [root@haha ~]# vim /etc/docker/daemon.json
  ​
  # 写私有仓库的ip和端口
  {
   "insecure-registries":[
      "http://192.168.2.30:5000"
   ]
  }
  ​
  # 重新启动docker服务
  [root@haha ~]# systemctl restart docker
  ​
  # 关闭本机的防火墙
  [root@haha ~]# ststemctl stop firewalld
  ​
  # 拉取私有仓库的镜像
  [root@haha ~]# docker pull 192.168.2.30:5000/centosnginx:v0
  v0: Pulling from centosnginx
  a1d0c7532777: Pull complete 
  5a3d9ba04912: Pull complete 
  ca847d29e107: Pull complete 
  46aacd2dd646: Pull complete 
  30902bbca4a1: Pull complete 
  4859db75d191: Pull complete 
  ea0b1348be03: Pull complete 
  2796eebf9904: Pull complete 
  Digest: sha256:5fb4d1018f32c53f3b21f17771bfb6b9832e76b790426b9f4c067f4c75003deb
  Status: Downloaded newer image for 192.168.2.30:5000/centosnginx:v0
  192.168.2.30:5000/centosnginx:v0
  ​
  # 查看拉取的镜像
  [root@haha ~]# docker images
  REPOSITORY                      TAG       IMAGE ID       CREATED        SIZE
  192.168.2.30:5000/centosnginx   v0        4a14f7d33da9   18 hours ago   422MB
  ​
  # 测试拉取的镜像
  [root@haha ~]# docker run -d --name c8  192.168.2.30:5000/centosnginx:v0
  decd8137f46ea29eb4012d9cc4be2a993ca2f3e6a0deeb2dfc18c3ceffbcb829
  ​
  [root@haha ~]# docker ps --all
  CONTAINER ID   IMAGE                              COMMAND                   CREATED              STATUS              PORTS     NAMES
  decd8137f46e   192.168.2.30:5000/centosnginx:v0   "/bin/sh -c /usr/sbi…"   About a minute ago   Up About a minute   80/tcp    c8

二、跨主机容器之间通讯

  
  # 创建容器
  [root@docker ~]# docker run -d  --name haha -p80:80/tcp centos:nginx 
  4f1af9bb44ed0894a7ace10ebd01cd5c136c9a10be7f5f63a1b54e482e8d2dad
  ​
  # 查看容器
  [root@docker ~]# docker ps -all
  CONTAINER ID   IMAGE          COMMAND                   CREATED       STATUS                    PORTS     NAMES
  4f1af9bb44ed   centos:nginx   "/bin/sh -c /usr/sbi…"   8 hours ago   Exited (0) 1 second ago             haha
  ​
  # 安装bridge-utils
  [root@docker ~]# yum -y install bridge-utils.x86_64
  ​
  # 停止docker服务
  [root@docker ~]# systemctl stop docker
  Warning: Stopping docker.service, but it can still be activated by:
    docker.socket
  ​
  # 显示网桥信息
  [root@docker ~]# brctl show
  bridge name bridge id      STP enabled interfaces
  docker0     8000.0242cb7fc4ce no 
  ​
  # 启动docker服务
  [root@docker ~]# systemctl start docker
  ​
  # 启动容器
  [root@docker ~]# docker start haha
  haha
  ​
  # 查看网桥信息
  [root@docker ~]# brctl show
  bridge name bridge id      STP enabled interfaces
  docker0     8000.0242cb7fc4ce no    veth5c3d783
  ​
  # 查看跨主机容器之间的通信方式
  [root@docker ~]# docker network ls
  NETWORK ID     NAME      DRIVER    SCOPE
  c1adfec7cffc   bridge    bridge    local
  d2749f541daa   host      host      local
  dc16f8943851   none      null      local
  # bridge-网桥模式
  # host-仅主机模式
  # none-独立的
  ​
  # 接连创建两个容器,就会发现他们的地址是连着的
  # 创建容器
  [root@docker ~]# docker run -it centos:latest /bin/bash
  ​
  # 退出不中断
  [root@f40505c2a977 /]# [root@docker ~]#
  ​
  # 查看容器ip
  [root@docker ~]# docker inspect f4|grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.17.0.2",
                      "IPAMConfig": null,
                      "IPAddress": "172.17.0.2",
                      
  # 创建容器
  [root@docker ~]# docker run -it centos:latest /bin/bash
  ​
  # 退出不中断
  [root@0558cad162d7 /]# [root@docker ~]# 
  ​
  # 查看容器ip
  [root@docker ~]# docker inspect 0558|grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.17.0.3",
                      "IPAMConfig": null,
                      "IPAddress": "172.17.0.3",
                      
  # 进行本主机和所创建容器的通信
  ​
  # 在宿主机的ls下查到的内容
  [root@docker ~]# ls /
  bin   dev  home  lib64  mnt  proc    root  sbin    srv  tmp  var
  boot  etc  lib   media  opt  regist  run   source  sys  usr
  ​
  # 创建能与主机通信的容器
  [root@docker ~]# docker run -it --network host centos:yum /bin/bash
  ​
  # 查看ls,发现和宿主机的一样
  [root@docker /]# ls
  bin  etc   lib   lost+found  mnt  proc  run   srv  tmp  var
  dev  home  lib64  media       opt  root  sbin  sys  usr
  ​
  # 退出容器不中断
  [root@docker /]# [root@docker ~]# 
  ​
  # 查看容器的ip地址,发现是空的
  [root@docker ~]# docker inspect 30e|grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "",
                      "IPAMConfig": null,
                      "IPAddress": "",
  ​
  # 安装iproute
  [root@docker /]# yum -y install iproute
  ​
  # 查看ip,发现可以查到宿主机的ip
  [root@docker /]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:24:a3:0f brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.30/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link noprefixroute 
         valid_lft forever preferred_lft forever
  3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
      link/ether 02:42:cb:7f:c4:ce brd ff:ff:ff:ff:ff:ff
      inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
         valid_lft forever preferred_lft forever
      inet6 fe80::42:cbff:fe7f:c4ce/64 scope link 
         valid_lft forever preferred_lft forever
  11: veth6c87079@if10: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
      link/ether 6a:47:f4:af:fa:ca brd ff:ff:ff:ff:ff:ff link-netnsid 0
      inet6 fe80::6847:f4ff:feaf:faca/64 scope link 
         valid_lft forever preferred_lft forever
  13: vethc725a6f@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default 
      link/ether 9a:d6:09:6b:fb:70 brd ff:ff:ff:ff:ff:ff link-netnsid 1
      inet6 fe80::98d6:9ff:fe6b:fb70/64 scope link 
         valid_lft forever preferred_lft forever
         
  # 给容器下载httpd
  [root@docker /]# yum -y install httpd
  ​
  # 给容器编辑httpd测试页面
  [root@docker /]# echo "haha" > /var/www/html/index.html
  ​
  # 启动httpd服务(报错)
  [root@docker /]# systemctl start httpd
  System has not been booted with systemd as init system (PID 1). Can't operate.
  Failed to connect to bus: Host is down
  ​
  # 使用此命令启动httpd服务
  [root@docker /]# httpd -k start
  AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using fe80::f09d:7503:dea1:e7ab. Set the 'ServerName' directive globally to suppress this message
  ​
  # 在容器内访问测试页面
  [root@docker /]# curl localhost
  haha
  ​
  # 退出容器不中断
  [root@docker /]# [root@docker ~]# 
  ​
  # 在宿主机中访问在容器中编辑的httpd测试页面
  [root@docker ~]# curl 192.168.2.30
  haha
  ​
  # 关闭防火墙
  [root@docker ~]# systemctl stop firewalld

使用浏览器访问容器中编辑的httpd的测试页面

192.168.2.30

三、跨主机容器之间的通信

主机名ip功能安装软件
node1192.168.2.10主控主机etcd,flannel,docker
node2192.168.2.11被控主机etcd,docker

主控

1.安装软件

  
  # 安装etcd数据库
  [root@node1 ~]# yum -y install etcd
  ​
  # 安装flannel
  [root@node1 ~]# yum -y install flannel

2.修改etcd数据库配置

  
  # 配置启动 etcd
  # 修改配置文件
  [root@node1 ~]# vim /etc/etcd/etcd.conf 
  6 ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
  21 ETCD_ADVERTISE_CLIENT_URLS="http://192.168.2.10:2379,http://192.168.2.10:4001"

3.启动数据库

  [root@node1 ~]# systemctl restart etcd

4.测试端口

  # 查看端口看有没有启动成功
  ​
  [root@node1 ~]# netstat -lnput|grep 2379
  tcp6       0      0 :::2379                 :::*                    LISTEN      1608/etcd           
  [root@node1 ~]# netstat -lnput|grep 4001
  tcp6       0      0 :::4001                 :::*                    LISTEN      1608/etcd    

5.设置开机自启动

  
  # 设置开机自启
  ​
  [root@node1 ~]# systemctl enable etcd.service 
  Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
  ​

6.测试数据库功能

  [root@node1 ~]# etcdctl set testdir/testkey0 1000
  1000
  [root@node1 ~]# etcdctl get testdir/testkey0
  1000
  [root@node1 ~]# etcdctl set b 123
  123
  [root@node1 ~]# etcdctl get b
  123

7.测试集群是否健康

  # 测试集群健康
  ​
  [root@node1 ~]# etcdctl -C http://192.168.2.10:4001 cluster-health
  member 8e9e05c52164694d is healthy: got healthy result from http://192.168.2.10:2379
  cluster is healthy
  [root@node1 ~]# etcdctl -C http://192.168.2.10:2379 cluster-health
  member 8e9e05c52164694d is healthy: got healthy result from http://192.168.2.10:2379
  cluster is healthy

8.修改flannel的配置文件

  [root@node1 ~]# vim /etc/sysconfig/flanneld 
  4 FLANNEL_ETCD_ENDPOINTS="http://192.168.2.10:2379"
  [root@node1 ~]# cat /etc/sysconfig/flanneld 
  # Flanneld configuration options  
  ​
  # etcd url location.  Point this to the server where etcd runs
  FLANNEL_ETCD_ENDPOINTS="http://192.168.2.10:2379"
  ​
  # etcd config key.  This is the configuration key that flannel queries
  # For address range assignment
  FLANNEL_ETCD_PREFIX="/atomic.io/network" # 存储数据的位置
  ​
  # Any additional options that you want to pass
  #FLANNEL_OPTIONS=""

9.向数据库中存入网段信息

  
  [root@node1 ~]# etcdctl mk /atomic.io/network/config '{ "Network" : "172.20.0.0/16" }' 
  { "Network" : "172.20.0.0/16" }
  ​
  [root@node1 ~]# etcdctl get /atomic.io/network/config
  { "Network" : "172.20.0.0/16" }

10.查看flannel0的ip地址

  [root@node1 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:a5:08:12 brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.10/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.64.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::3ac2:e60:225:e090/64 scope link flags 800 
         valid_lft forever preferred_lft forever
  ​

11.安装docker

  # 执行docker脚本
  ​
  [root@node1 ~]# source docker.sh

12.docker服务没有开启之前查看ip

  
  # 启动docker服务前
  ​
  # 查看ip
  [root@node1 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:a5:08:12 brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.10/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.64.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::3ac2:e60:225:e090/64 scope link flags 800 
         valid_lft forever preferred_lft forever

13.启动docker服务后,查看ip

  
  # 启动docker服务
  [root@node1 ~]# systemctl start docker
  ​
  # 查看ip,就有docker了
  [root@node1 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:a5:08:12 brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.10/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.64.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::3ac2:e60:225:e090/64 scope link flags 800 
         valid_lft forever preferred_lft forever
  4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
      link/ether 02:42:35:91:9e:11 brd ff:ff:ff:ff:ff:ff
      inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
         valid_lft forever preferred_lft forever
         
  [root@node1 ~]# cat /run/flannel/subnet.env 
  FLANNEL_NETWORK=172.20.0.0/16
  FLANNEL_SUBNET=172.20.64.1/24
  FLANNEL_MTU=1472
  FLANNEL_IPMASQ=false

14.从配置好docker服务的主机复制一份daemon.json文件

  
  # 从其他主机复制一份daemon.json
  ​
  [root@node1 ~]# scp root@192.168.2.30:/etc/docker/daemon.json /etc/docker
  root@192.168.2.30's password: 
  daemon.json                                         100%  402   247.1KB/s   00:00    
  [root@node1 ~]# vim /etc/docker/daemon.json 
  {
      "registry-mirrors": [
          "https://do.nark.eu.org",
          "https://dc.j8.work",
          "https://docker.m.daocloud.io",
          "https://dockerproxy.com",
          "https://docker.mirrors.ustc.edu.cn",
          "https://docker.nju.edu.cn"
      ],
          "hosts":  [
                    "tcp://0.0.0.0:2375",
                    "unix:///var/run/docker.sock"
          ],
          "insecure-registries":[
                  "http://192.168.2.30:5000"
  ​
          ]
  }

15.docker不能重启,修改docker.service文件

  
  [root@node1 ~]# vim /usr/lib/systemd/system/docker.service
  ​
  # 修改前
  ​
  ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
  ​
  # 修改后
  ​
  ExecStart=/usr/bin/dockerd 
  ​
  # 需要重新加载文件,才能重启
  [root@node1 ~]# systemctl daemon-reload
  [root@node1 ~]# systemctl restart docker.service 

16.查看flannel子网ip

  [root@node1 ~]# cat /run/flannel/subnet.env 
  FLANNEL_NETWORK=172.20.0.0/16
  FLANNEL_SUBNET=172.20.64.1/24
  FLANNEL_MTU=1472
  FLANNEL_IPMASQ=false

17.修改docker0的IP和flannel0的IP在同一个网段

  
  # 修改添加桥ip和路由字节1472-1500
  ​
  [root@node1 ~]# vim /etc/docker/daemon.json 
  ​
  {
      "registry-mirrors": [
          "https://do.nark.eu.org",
          "https://dc.j8.work",
          "https://docker.m.daocloud.io",
          "https://dockerproxy.com",
          "https://docker.mirrors.ustc.edu.cn",
          "https://docker.nju.edu.cn"
      ],
          "hosts":  [
                    "tcp://0.0.0.0:2375",
                    "unix:///var/run/docker.sock"
          ],
          "insecure-registries":[
                  "http://192.168.2.30:5000"
  ​
          ],
          "bip" : "172.20.64.1/24",
          "mtu" : 1472
  }
  ​
  # 重启启动docker服务
  [root@node1 ~]# systemctl restart docker.service 

18.验证

  # 验证成功
  # docker0的IP和flannel0的IP在同一个网段(172.20.64.0)
  [root@node1 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:a5:08:12 brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.10/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.64.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::3ac2:e60:225:e090/64 scope link flags 800 
         valid_lft forever preferred_lft forever
  4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1472 qdisc noqueue state DOWN group default 
      link/ether 02:42:35:91:9e:11 brd ff:ff:ff:ff:ff:ff
      inet 172.20.64.1/24 brd 172.20.64.255 scope global docker0
         valid_lft forever preferred_lft forever

19.拉取镜像测试docker的ip地址

  
  # 拉取centos镜像
  [root@node1 ~]# docker pull centos
  Using default tag: latest
  latest: Pulling from library/centos
  a1d0c7532777: Pull complete 
  Digest: sha256:a27fd8080b517143cbbbab9dfb7c8571c40d67d534bbdee55bd6c473f432b177
  Status: Downloaded newer image for centos:latest
  docker.io/library/centos:latest
  # 创建容器
  [root@node1 ~]# docker run -it centos:latest /bin/bash
  ​
  # 退出不中断容器
  [root@72ed0ee15de4 /]# [root@node1 ~]# 
  ​
  # 查看容器的ip地址
  [root@node1 ~]# docker inspect 72e | grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.20.64.2",
                      "IPAMConfig": null,
                      "IPAddress": "172.20.64.2",

从控

node2

  
  # 安装flannel
  [root@node2 ~]# yum -y install flannel
  ​
  # 修改flannel配置文件
  # 绑定node1的数据库
  [root@node2 ~]# vim /etc/sysconfig/flanneld 
  4  FLANNEL_ETCD_ENDPOINTS="http://192.168.2.10:2379"
  ​
  # 查看修改后的flannel配置文件的内容
  [root@node2 ~]# cat /etc/sysconfig/flanneld 
  # Flanneld configuration options  
  ​
  # etcd url location.  Point this to the server where etcd runs
  FLANNEL_ETCD_ENDPOINTS="http://192.168.2.10:2379"
  ​
  # etcd config key.  This is the configuration key that flannel queries
  # For address range assignment
  FLANNEL_ETCD_PREFIX="/atomic.io/network"
  ​
  # Any additional options that you want to pass
  #FLANNEL_OPTIONS=""
  ​
  # 启动flannel服务 
  [root@node2 ~]# systemctl start flanneld.service 
  Job for flanneld.service failed because a timeout was exceeded. See "systemctl status flanneld.service" and "journalctl -xe" for details.
  ​
  # 如果启动失败,就关掉node1的防火墙
  [root@node1 ~]# systemctl stop firewalld
  [root@node1 ~]# setenforce 0
  ​
  # 成功启动flannel服务
  [root@node2 ~]# systemctl start flanneld.service 
  ​
  # 查看flannel0的ip地址(172.20.45.0)
  [root@node2 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:ef:db:fa brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.11/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link noprefixroute 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative noprefixroute dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative noprefixroute dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.45.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::4dd7:31b5:677f:fd33/64 scope link flags 800 
         valid_lft forever preferred_lft forever
  ​

修改docker0的IP和flannel0为同一个网段

  [root@node2 ~]# cat /run/flannel/subnet.env
  FLANNEL_NETWORK=172.20.0.0/16
  FLANNEL_SUBNET=172.20.45.1/24
  FLANNEL_MTU=1472
  FLANNEL_IPMASQ=false
  [root@node2 ~]# vim /etc/docker/daemon.json
  {
      "registry-mirrors": [
          "https://do.nark.eu.org",
          "https://dc.j8.work",
          "https://docker.m.daocloud.io",
          "https://dockerproxy.com",
          "https://docker.mirrors.ustc.edu.cn",
          "https://docker.nju.edu.cn"
      ],
          "hosts":  [
                    "tcp://0.0.0.0:2375",
                    "unix:///var/run/docker.sock"
          ],
          "insecure-registries":[
                  "http://192.168.2.30:5000"
  ​
          ],
          "bip" : "172.20.45.1/24",
          "mtu" : 1472
  }
  ​
  # 修改daemon.json文件,需要重启docker服务
  # 启动失败
  [root@node2 ~]# systemctl restart docker
  Job for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details.
  ​
  # 修改docker.service配置文件
  [root@node2 ~]# vim /usr/lib/systemd/system/docker.service
  ​
  # 删除dockerd后面的内容
  13 ExecStart=/usr/bin/dockerd
  ​
  # 启动失败
  [root@node2 ~]# systemctl restart docker
  Warning: docker.service changed on disk. Run 'systemctl daemon-reload' to reload units.
  Job for docker.service failed because start of the service was attempted too often. See "systemctl status docker.service" and "journalctl -xe" for details.
  To force a start use "systemctl reset-failed docker.service" followed by "systemctl start docker.service" again.
  ​
  # 加载配置文件
  [root@node2 ~]# systemctl daemon-reload
  ​
  # 成功启动docker服务
  [root@node2 ~]# systemctl restart docker
  ​
  # 查看flannel0和docker0的ip地址,就会看到在同一个网段(172.20.45.0)
  [root@node2 ~]# ip a s
  1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
      link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
      inet 127.0.0.1/8 scope host lo
         valid_lft forever preferred_lft forever
      inet6 ::1/128 scope host 
         valid_lft forever preferred_lft forever
  2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
      link/ether 00:0c:29:ef:db:fa brd ff:ff:ff:ff:ff:ff
      inet 192.168.2.11/24 brd 192.168.2.255 scope global noprefixroute ens33
         valid_lft forever preferred_lft forever
      inet6 fe80::7bef:aa24:508b:e09/64 scope link noprefixroute 
         valid_lft forever preferred_lft forever
      inet6 fe80::9d6d:f728:f4f1:6c5b/64 scope link tentative noprefixroute dadfailed 
         valid_lft forever preferred_lft forever
      inet6 fe80::f09d:7503:dea1:e7ab/64 scope link tentative noprefixroute dadfailed 
         valid_lft forever preferred_lft forever
  3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
      link/none 
      inet 172.20.45.0/16 scope global flannel0
         valid_lft forever preferred_lft forever
      inet6 fe80::4dd7:31b5:677f:fd33/64 scope link flags 800 
         valid_lft forever preferred_lft forever
  4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1472 qdisc noqueue state DOWN group default 
      link/ether 02:42:0b:a4:c1:55 brd ff:ff:ff:ff:ff:ff
      inet 172.20.45.1/24 brd 172.20.45.255 scope global docker0
         valid_lft forever preferred_lft forever
  [root@node2 ~]# docker pull centos
  ​

拉取镜像测试ip地址

  # 拉取centos镜像
  [root@node2 ~]# docker pull centos
  Using default tag: latest
  latest: Pulling from library/centos
  a1d0c7532777: Pull complete 
  Digest: sha256:a27fd8080b517143cbbbab9dfb7c8571c40d67d534bbdee55bd6c473f432b177
  Status: Downloaded newer image for centos:latest
  docker.io/library/centos:latest
  ​
  # 创建容器
  [root@node2 ~]# docker run -it centos:latest /bin/bash
  ​
  # 退出容器不中断
  [root@d2d8a0756299 /]# [root@node2 ~]# 
  ​
  # 查看容器ip地址,也和flannel0在同一个网段
  [root@node2 ~]# docker inspect d2 | grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.20.45.2",
                      "IPAMConfig": null,
                      "IPAddress": "172.20.45.2",
  ​
  # 挂载容器终端,ping node1容器的地址:172.20.64.2
  [root@node2 ~]# docker attach d2
  [root@d2d8a0756299 /]# ping 172.20.64.2
  PING 172.20.64.2 (172.20.64.2) 56(84) bytes of data.
  64 bytes from 172.20.64.2: icmp_seq=1 ttl=60 time=2.51 ms
  64 bytes from 172.20.64.2: icmp_seq=2 ttl=60 time=0.759 ms
  ^C
  --- 172.20.64.2 ping statistics ---
  2 packets transmitted, 2 received, 0% packet loss, time 1002ms
  rtt min/avg/max/mdev = 0.759/1.632/2.506/0.874 ms
  ​
  # 在node1上拉取镜像,创建容器,测试ip地址
  [root@node1 ~]# docker run -it centos:latest /bin/bash
  [root@72ed0ee15de4 /]# [root@node1 ~]# 
  [root@node1 ~]# docker inspect 72e | grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.20.64.2",
                      "IPAMConfig": null,
                      "IPAddress": "172.20.64.2",
                      
  # 在node1上挂载容器的终端,ping node2容器的地址:172.20.45.2
  [root@node1 ~]# docker attach 72e
  [root@72ed0ee15de4 /]# ping 172.20.45.2
  PING 172.20.45.2 (172.20.45.2) 56(84) bytes of data.
  64 bytes from 172.20.45.2: icmp_seq=1 ttl=60 time=0.437 ms
  64 bytes from 172.20.45.2: icmp_seq=2 ttl=60 time=0.845 ms
  ^C
  --- 172.20.45.2 ping statistics ---
  2 packets transmitted, 2 received, 0% packet loss, time 1001ms
  rtt min/avg/max/mdev = 0.437/0.641/0.845/0.204 ms
  ​

node2步骤小结:

1.安装flannel

  yum -y install flannel

2.配置flannel 要访问的etcd数据库所在的位置

  [root@node2 ~]# cat /etc/sysconfig/flanneld 
  # Flanneld configuration options  
  ​
  # etcd url location.  Point this to the server where etcd runs
  FLANNEL_ETCD_ENDPOINTS="http://192.168.2.10:2379"
  ​
  # etcd config key.  This is the configuration key that flannel queries
  # For address range assignment
  FLANNEL_ETCD_PREFIX="/atomic.io/network"
  ​
  # Any additional options that you want to pass
  #FLANNEL_OPTIONS=""

3.启动flannel

  systemctl start flannel 

4.查看flannel分配的ip网段

  [root@node2 ~]# cat /run/flannel/subnet.env
  FLANNEL_NETWORK=172.20.0.0/16
  FLANNEL_SUBNET=172.20.45.1/24
  FLANNEL_MTU=1472
  FLANNEL_IPMASQ=false

5.安装docker

  # 执行docker.sh脚本

6.将flannel分配的网段写入到daemon.json

  [root@node2 ~]# vim /etc/docker/daemon.json
  {
      "registry-mirrors": [
          "https://do.nark.eu.org",
          "https://dc.j8.work",
          "https://docker.m.daocloud.io",
          "https://dockerproxy.com",
          "https://docker.mirrors.ustc.edu.cn",
          "https://docker.nju.edu.cn"
      ],
          "hosts":  [
                    "tcp://0.0.0.0:2375",
                    "unix:///var/run/docker.sock"
          ],
          "insecure-registries":[
                  "http://192.168.2.30:5000"
  ​
          ],
          "bip" : "172.20.45.1/24",
          "mtu" : 1472
  }
  ​

7.重启docker 如果不能重启,就修改远程管理

  [root@node2 ~]# systemctl restart docker
  Job for docker.service failed because the control process exited with error code. See "systemctl status docker.service" and "journalctl -xe" for details.
  [root@node2 ~]# vim /usr/lib/systemd/system/docker.service
  # 删除dockerd后面的内容
  13 ExecStart=/usr/bin/dockerd
  ​
  [root@node2 ~]# systemctl restart docker
  Warning: docker.service changed on disk. Run 'systemctl daemon-reload' to reload units.
  Job for docker.service failed because start of the service was attempted too often. See "systemctl status docker.service" and "journalctl -xe" for details.
  To force a start use "systemctl reset-failed docker.service" followed by "systemctl start docker.service" again.
  [root@node2 ~]# systemctl daemon-reload

8.启动docker

  [root@node2 ~]# systemctl restart docker

9.拉去一个centos镜像

  [root@node2 ~]# docker pull centos
  Using default tag: latest
  latest: Pulling from library/centos
  a1d0c7532777: Pull complete 
  Digest: sha256:a27fd8080b517143cbbbab9dfb7c8571c40d67d534bbdee55bd6c473f432b177
  Status: Downloaded newer image for centos:latest
  docker.io/library/centos:latest

10.ping node1中容器的ip地址

  [root@node2 ~]# docker run -it centos:latest /bin/bash
  [root@d2d8a0756299 /]# [root@node2 ~]# 
  [root@node2 ~]# docker inspect d2 | grep IPA
              "SecondaryIPAddresses": null,
              "IPAddress": "172.20.45.2",
                      "IPAMConfig": null,
                      "IPAddress": "172.20.45.2",
  ​
  [root@node2 ~]# docker attach d2
  [root@d2d8a0756299 /]# ping 172.20.64.2
  PING 172.20.64.2 (172.20.64.2) 56(84) bytes of data.
  64 bytes from 172.20.64.2: icmp_seq=1 ttl=60 time=2.51 ms
  64 bytes from 172.20.64.2: icmp_seq=2 ttl=60 time=0.759 ms
  ^C
  --- 172.20.64.2 ping statistics ---
  2 packets transmitted, 2 received, 0% packet loss, time 1002ms
  rtt min/avg/max/mdev = 0.759/1.632/2.506/0.874 ms

四、总结:工作原理

1.使用flannel为docker主机(宿主)分配网段

2.网段的信息以及ip的信息保存在etcd数据库中

3.当flannel开始运行的时候,会从etcd数据库中读取

{"Network":"172.20.0.0/16"},随机为当前的主机添加一个flannel0网段172.20.64.0

4.配置docker的daemon文件,让docker0网卡变成和flannel0的网段一致,之后docker下创建的容器的ip就在flannel网段控制之内


http://www.kler.cn/a/280328.html

相关文章:

  • 如何监控Eureka集群:Prometheus与Grafana的监控集成
  • 音频分割软件有什么?最方便的音频分割软件分享给你
  • client网络模块的开发和client与server端的部分联动调试
  • FPGA工程师成长路线(持续更新ing,欢迎补充)
  • Ps:首选项 - 常规
  • golang-gin使用中间件处理文本-时间字符串格式
  • 解决 VMware 中 Ubuntu文件系统磁盘空间不足
  • 二十九、channel的select
  • 如何设置Winfrom中dataGridView中的内容换行并行高自适应
  • 不会PS怎么快速抠图?试试这3种方法,抠图干净又高效!
  • VSCode插件
  • 简单实现进度条效果(vue2)
  • 2024了,Neo4j能显示节点图片吗?
  • 【架构-24】XML和JSON
  • 文心快码帮你解大厂面试题:TCP关闭连接的过程,为什么要4次挥手,为什么最大等待时间是2*MSL?
  • 【论文阅读】ColabFold: making protein folding accessible to all
  • iPhone不停重启怎么办?全面解析与解决方案
  • 对数据处理过程中,缺失值和异常值应该怎么处理?
  • Python的 数据结构
  • 效率提升好物分享2