Docker基础 - 05网络
一、Docker四种网络模式
OVS: OpenVSwitch 、SDN Overlay Network: 隧道封装、叠加网络
[root@cl-server ~]# docker network ls NETWORK ID NAME DRIVER SCOPE bd382bf32ccd bridge bridge local c2c5ba17c4bf host host local 50aba51ecc64 none null loca [root@cl-server ~]# yum -y install bridge-utils
[root@component ~]# brctl show bridge name bridge id STP enabled interfaces docker0 8000.024265273925 no
[root@component ~]# docker network inspect bridge [ { "Name": "bridge", "Id": "619e35de43822b03e2ba439188e43487a065ea0a15eeb1cc324a8abaaa09e9a7", "Created": "2021-11-04T21:50:25.088109165+08:00", "Scope": "local", "Driver": "bridge", "EnableIPv6": false, "IPAM": { "Driver": "default", "Options": null, "Config": [ { "Subnet": "172.17.0.0/16", "Gateway": "172.17.0.1" } ] }, "Internal": false, "Attachable": false, "Ingress": false, "ConfigFrom": { "Network": "" }, "ConfigOnly": false, "Containers": {}, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" }, "Labels": {} } ]
二、 桥接模式
2.1 创建容器
[root@cl-server ~]# docker run --name dk03 -it --network bridge -h dk03.kunking.com --rm busybox:latest / # hostname dk03.kunking.com / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 18: eth0@if19: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0 valid_lft forever preferred_lft forever / # cat /etc/hosts 127.0.0.1 localhost ::1 localhost ip6-localhost ip6-loopback fe00::0 ip6-localnet ff00::0 ip6-mcastprefix ff02::1 ip6-allnodes ff02::2 ip6-allrouters 172.17.0.3 dk03.kunking.com dk03
/ # cat /etc/resolv.conf (获取宿主机的配置) # Generated by NetworkManager nameserver 114.114.114.114
/ # nslookup BusyBox v1.32.1 (2021-04-07 19:47:14 UTC) multi-call binary. Usage: nslookup [-type=QUERY_TYPE] [-debug] HOST [DNS_SERVER] Query DNS about HOST QUERY_TYPE: soa,ns,a,aaaa,cname,mx,txt,ptr,any / # nslookup -type=A www.baidu.com Server: 114.114.114.114 Address: 114.114.114.114:53 Non-authoritative answer: www.baidu.com canonical name = www.a.shifen.com Name: www.a.shifen.com Address: 180.101.49.11 Name: www.a.shifen.com Address: 180.101.49.12 [root@cl-server ~]# ip a 19: veth07d8b01@if18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default link/ether 96:20:ad:26:46:09 brd ff:ff:ff:ff:ff:ff link-netnsid 3 inet6 fe80::9420:adff:fe26:4609/64 scope link valid_lft forever preferred_lft forever
2.2 端口映射
Docker0 为NAT桥,容器获得的是私有网络地址,如果要开放容器或其上的服务被外部网络访问,需要在宿主机上定义DNAT规则。
对宿主机某IP地址的访问全部映射到容器地址
- -A PREROUTING -d 主机IP -j DNAT --to-destination 容器IP
对宿主机某IP地址的某端口的访问映射到容器地址的某端口
- -A PREROUTING -d 主机IP -p {tcp|udp} --dport 主机端口 -j DNAT --to-destination 容器IP:容器端口
为docker run 命令使用 -p 选项即可实现端口映射,无需手动添加规则
- -p <containerPort> 容器端口映射至主机所有IP地址的一个动态端口
- -p <hostPort>:<containerPort> 容器端口映射至指定的主机端口
- -p <ip>::<containerPort> 容器端口映射至主机指定IP的动态端口
- -p <ip>:<hostPort>:<containerPort> 容器端口映射至主机指定IP的指定端口
[root@cl-server ~]# docker run --name myhttpd --rm -p 192.168.234.6::80 kunking/httpd:v0.1-2 [root@cl-server ~]# docker run --name myhttpd --rm -p 80:80 kunking/httpd:v0.1-2 [root@cl-server ~]# docker run --name myhttpd --rm -p 192.168.234.6:8080:80 kunking/httpd:v0.1-2
[root@component ~]# docker run -it --name myhttpd --rm -p 80 nginx [root@component ~]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 83cd6bf7b254 nginx "/docker-entrypoint.…" 22 seconds ago Up 20 seconds 0.0.0.0:49153->80/tcp, :::49153->80/tcp myhttpd [root@component ~]# curl http://127.0.0.1:49153 [root@component ~]# iptables -nvL -t nat Chain DOCKER (2 references) pkts bytes target prot opt in out source destination 0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0 0 0 DNAT tcp -- !docker0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:49153 to:172.17.0.2:80 [root@component ~]# docker port myhttpd 80/tcp -> 0.0.0.0:49153 80/tcp -> :::49153
2.3 其他网络配置
Bridged containers
"--hostname HOSTNAME" 选项为容器指定主机名,
docker run --rm --net bridge --hostname bbox.kunking.com busybox nslookup bbox.kunking.com
"--dns DNS_SERVR_IP" 选项为容器指定使用的dns服务器地址
docker run --rm --dns 192.168.6.1 busybox nslookup docker.com
"--add-host HOSTNAME:IP"选项为容器指定本地主机名解析项
docker run --rm --dns 192.168.6.1 --add-host "docker.com:192.168.6.100" busybox nslookup docker.com
[root@cl-server ~]# docker run --name dk04 -it --network bridge -h dk03.kunking.com --dns 8.8.8.8 --rm busybox:latest / # cat /etc/resolv.conf nameserver 8.8.8.8 [root@cl-server ~]# docker run --name dk04 -it --network bridge -h dk03.kunking.com --dns-search ilinux.io --rm busybox:latest / # cat /etc/resolv.conf search ilinux.io nameserver 114.114.114.114 [root@cl-server ~]# docker run --name dk04 -it --network bridge -h dk03.kunking.com --dns-search ilinux.io --add-host www.kunking.com:6.6.6.6 --rm busybox:latest / # cat /etc/hosts 6.6.6.6 www.kunking.com
2.4 veth 网卡peer
[root@component ~]# docker run -it --name b1 busybox:latest / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 4: eth0@if5: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0 valid_lft forever preferred_lft forever
[root@component ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 00:50:56:31:7c:c2 brd ff:ff:ff:ff:ff:ff inet 192.168.6.200/24 brd 192.168.6.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet6 fe80::8a81:e941:8c9c:b326/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:42:48:2f:23:85 brd ff:ff:ff:ff:ff:ff inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::42:48ff:fe2f:2385/64 scope link valid_lft forever preferred_lft forever 5: veth89722c7@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default link/ether d6:9f:a2:05:3f:9a brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::d49f:a2ff:fe05:3f9a/64 scope link valid_lft forever preferred_lft forever
三、 Joined containers
- 联盟式容器是指 使用某个已经存在容器的网络接口的容器。接口被联盟内的各容器共享使用,联盟式容器彼此间完全无隔离。
- 联盟式容器彼此共享同一个网络名称空间(UTS/NET/IPC),其他名称空间如User、Mount等还是隔离的。
- 联盟式容器彼此间存在端口冲突的可能性。通常只会在多个容器上的程序需要程序loopback接口互相通信,或对某已存在的容器的网络属性进行监控时才使用此种模式的网络模型。
[root@component ~]# docker run -itd --rm --name web1 -p 8088 busybox /bin/httpd -p 8088 -f 0040a4d3033a6b460d4d678e2b0c2af6a0596b93b9c2eeb3b4da58891273b4ab [root@component ~]# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 0040a4d3033a busybox "/bin/httpd -p 8088 …" 29 seconds ago Up 28 seconds 0.0.0.0:49155->8088/tcp, :::49155->8088/tcp web1 [root@component ~]# docker run -it --rm --name joined --net container:web1 busybox netstat -tan Active Internet connections (servers and established) Proto Recv-Q Send-Q Local Address Foreign Address State tcp 0 0 :::8088 :::* LISTEN
3.1 创建容器
[root@cl-server ~]# docker run --name b2 -it --rm busybox / # ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02 inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0 UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:6 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:516 (516.0 B) TX bytes:0 (0.0 B) [root@cl-server ~]# docker run --name b3 --network container:b2 -it --rm busybox / # ifconfig eth0 Link encap:Ethernet HWaddr 02:42:AC:11:00:02 inet addr:172.17.0.2 Bcast:172.17.255.255 Mask:255.255.0.0 UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 RX packets:8 errors:0 dropped:0 overruns:0 frame:0 TX packets:0 errors:0 dropped:0 overruns:0 carrier:0 collisions:0 txqueuelen:0 RX bytes:656 (656.0 B) TX bytes:0 (0.0 B)
3.2 文件系统还是相互隔离的
b3容器: / # ls /tmp / # echo "hello, b3" > /tmp/index.html / # httpd -h /tmp / # netstat -tul Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State tcp 0 0 :::80 :::* LISTEN b2容器: / # mkdir /tmp/b2 / # ls /tmp/ b2 / # wget -O - -q 127.0.0.1 hello, b3
3.3 容器退出
# b2 容器先退出, b3容器的网络端口就不存在了。 / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever
四、Host主机模式
[root@cl-server ~]# docker run --name b4 --network host -it --rm busybox / # ifconfig docker0 Link encap:Ethernet HWaddr 02:42:61:72:CA:97 inet addr:172.17.0.1 Bcast:172.17.255.255 Mask:255.255.0.0 ens33 Link encap:Ethernet HWaddr 00:50:56:2F:29:8D inet addr:192.168.234.6 Bcast:192.168.234.255 Mask:255.255.255.0 lo Link encap:Local Loopback inet addr:127.0.0.1 Mask:255.0.0.0 / # echo "hello, container" > /tmp/index.html / # httpd -h /tmp / # netstat -tul Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State tcp 0 0 cl-server:6379 0.0.0.0:* LISTEN tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN tcp 0 0 :::3306 :::* LISTEN tcp 0 0 :::80 :::* LISTEN tcp 0 0 :::22 :::* LISTEN udp 0 0 localhost:323 0.0.0.0:* udp 0 0 localhost:323 :::* [root@cl-server ~]# netstat -tunlp Active Internet connections (only servers) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name tcp6 0 0 :::80 :::* LISTEN 13327/httpd
访问宿主机的80端口
五、无网络接口
[root@component ~]# docker run --name dk02 -it --network none --rm busybox:latest / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever / #
六、手工配置网络peer对
6.1 创建网络命名空间
[root@cl-server ~]# ip netns help Usage: ip netns list ip netns add NAME ip netns set NAME NETNSID ip [-all] netns delete [NAME] ip netns identify [PID] ip netns pids NAME ip [-all] netns exec [NAME] cmd ... ip netns monitor ip netns list-id [root@cl-server ~]# ip netns add r1 [root@cl-server ~]# ip netns add r2 [root@cl-server ~]# ip netns list r2 r1 [root@cl-server ~]# ip netns exec r1 ifconfig -a lo: flags=8<LOOPBACK> mtu 65536 loop txqueuelen 1000 (Local Loopback) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 [root@cl-server ~]# ip netns exec r2 ifconfig -a lo: flags=8<LOOPBACK> mtu 65536 loop txqueuelen 1000 (Local Loopback) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
6.2 创建接口,添加到网络命名空间
[root@cl-server ~]# ip link add name veth1.1 type veth peer name veth1.2 [root@cl-server ~]# ip link show 14: veth1.2@veth1.1: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether aa:ea:5c:e7:82:cb brd ff:ff:ff:ff:ff:ff 15: veth1.1@veth1.2: <BROADCAST,MULTICAST,M-DOWN> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether ba:85:8f:a9:5e:51 brd ff:ff:ff:ff:ff:ff [root@cl-server ~]# ip link set dev veth1.2 netns r1 [root@cl-server ~]# ip link show # 查询不到 veth1.2 15: veth1.1@if14: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether ba:85:8f:a9:5e:51 brd ff:ff:ff:ff:ff:ff link-netnsid 1 [root@cl-server ~]# ip netns exec r1 ifconfig -a lo: flags=8<LOOPBACK> mtu 65536 loop txqueuelen 1000 (Local Loopback) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 veth1.2: flags=4098<BROADCAST,MULTICAST> mtu 1500 ether aa:ea:5c:e7:82:cb txqueuelen 1000 (Ethernet) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 [root@cl-server ~]# ip netns exec r1 ip link set dev veth1.2 name eth0 [root@cl-server ~]# ip netns exec r1 ifconfig -a eth0: flags=4098<BROADCAST,MULTICAST> mtu 1500 ether aa:ea:5c:e7:82:cb txqueuelen 1000 (Ethernet) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 lo: flags=8<LOOPBACK> mtu 65536 loop txqueuelen 1000 (Local Loopback) RX packets 0 bytes 0 (0.0 B) RX errors 0 dropped 0 overruns 0 frame 0 TX packets 0 bytes 0 (0.0 B) TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
6.3 接口配置IP地址
[root@cl-server ~]# ip netns exec r1 ifconfig eth0 10.1.0.2/24 up [root@cl-server ~]# ip link set dev veth1.1 netns r2 [root@cl-server ~]# ip netns exec r2 ifconfig veth1.1 10.1.0.3/24 up
[root@cl-server ~]# ip netns exec r2 ping 10.1.0.2 PING 10.1.0.2 (10.1.0.2) 56(84) bytes of data. 64 bytes from 10.1.0.2: icmp_seq=1 ttl=64 time=0.085 ms 64 bytes from 10.1.0.2: icmp_seq=2 ttl=64 time=0.039 ms
七、指定Docker网桥的地址
7.1 查看原信息
[root@component ~]# docker network inspect bridge [ { "Name": "bridge", "Id": "f918fb2fe5ead9f4e71093d6469b12fdfe971029d31a30d0ba38f2640d0a3b49", "Created": "2021-11-06T09:08:24.626079433+08:00", "Scope": "local", "Driver": "bridge", "EnableIPv6": false, "IPAM": { "Driver": "default", "Options": null, "Config": [ { "Subnet": "172.17.0.0/16", "Gateway": "172.17.0.1" } ] }, "Internal": false, "Attachable": false, "Ingress": false, "ConfigFrom": { "Network": "" }, "ConfigOnly": false, "Containers": { "873f4731307d4d7eca2cb5128567e8cbfa2ae30fce4aeeb64b0ce425fc517793": { "Name": "dc01", "EndpointID": "c2b32e5d4055b19d5a3d76ae9748ee76120c8e3bcb778cbe7a975370ed18e091", "MacAddress": "02:42:ac:11:00:02", "IPv4Address": "172.17.0.2/16", "IPv6Address": "" } }, "Options": { "com.docker.network.bridge.default_bridge": "true", "com.docker.network.bridge.enable_icc": "true", "com.docker.network.bridge.enable_ip_masquerade": "true", "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", "com.docker.network.bridge.name": "docker0", "com.docker.network.driver.mtu": "1500" }, "Labels": {} } ]
7.2 修改网桥IP地址和地址段
bip: bridge ip, 指定docker0 桥自身的IP地址
[root@component ~]# vi /etc/docker/daemon.json "bip": "10.0.1.1/24" [root@component ~]# systemctl daemon-reload [root@component ~]# systemctl restart docker [root@component ~]# docker network inspect bridge "IPAM": { "Driver": "default", "Options": null, "Config": [ { "Subnet": "10.0.1.0/24", "Gateway": "10.0.1.1" } ] },
7.3 容器IP变化
[root@component _data]# docker exec -it dc01 /bin/sh / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0 valid_lft forever preferred_lft forever / # exit [root@component _data]# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 873f4731307d busybox "sh" 9 minutes ago Exited (137) 3 minutes ago dc01 [root@component _data]# docker start dc01 dc01 [root@component _data]# docker exec -it dc01 /bin/sh / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 14: eth0@if15: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:0a:00:01:02 brd ff:ff:ff:ff:ff:ff inet 10.0.1.2/24 brd 10.0.1.255 scope global eth0 valid_lft forever preferred_lft forever
八、自定义网桥
8.1 自定义网桥
[root@component ~]# brctl show bridge name bridge id STP enabled interfaces docker0 8000.0242141eeebf no
[root@component ~]# docker network create -d bridge --subnet "172.67.0.0/16" --gateway "172.67.0.1" mybr1 2506a661b1ef4e85c896515fe831f452865e5c1cf962bae221de6fad42bc9faf
[root@component ~]# docker network ls NETWORK ID NAME DRIVER SCOPE 312b1dc9eaac bridge bridge local b0d9dac9183d host host local 2506a661b1ef mybr1 bridge local dfb9ff737ae2 none null local [root@component ~]# brctl show bridge name bridge id STP enabled interfaces br-2506a661b1ef 8000.0242b672397d no docker0 8000.0242141eeebf no
8.2 使用不同网桥创建容器
[root@component ~]# docker run --name mybr10 -it --net mybr1 --rm busybox / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 17: eth0@if18: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:ac:43:00:02 brd ff:ff:ff:ff:ff:ff inet 172.67.0.2/16 brd 172.67.255.255 scope global eth0 valid_lft forever preferred_lft forever [root@component ~]# docker run --name mybr12 -it --rm busybox / # ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever 19: eth0@if20: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue link/ether 02:42:0a:00:01:02 brd ff:ff:ff:ff:ff:ff inet 10.0.1.2/24 brd 10.0.1.255 scope global eth0 valid_lft forever preferred_lft forever [root@component ~]# brctl show bridge name bridge id STP enabled interfaces br-2506a661b1ef 8000.0242b672397d no vethf8c97b1 docker0 8000.0242141eeebf no veth7f9bdea
8.3 容器网络通信
/ # ping 172.67.0.1 PING 172.67.0.1 (172.67.0.1): 56 data bytes 64 bytes from 172.67.0.1: seq=0 ttl=64 time=0.663 ms 64 bytes from 172.67.0.1: seq=1 ttl=64 time=0.238 ms ^C --- 172.67.0.1 ping statistics --- 5 packets transmitted, 5 packets received, 0% packet loss round-trip min/avg/max = 0.211/0.308/0.663 ms / # ping 172.67.0.2 PING 172.67.0.2 (172.67.0.2): 56 data bytes ^C --- 172.67.0.2 ping statistics --- 71 packets transmitted, 0 packets received, 100% packet loss
/ # ping 10.0.1.3 PING 10.0.1.3 (10.0.1.3): 56 data bytes 64 bytes from 10.0.1.3: seq=0 ttl=64 time=1.055 ms 64 bytes from 10.0.1.3: seq=1 ttl=64 time=0.626 ms ^C --- 10.0.1.3 ping statistics --- 5 packets transmitted, 5 packets received, 0% packet loss round-trip min/avg/max = 0.131/0.415/1.055 ms
/ # ping 192.168.6.1 PING 192.168.6.1 (192.168.6.1): 56 data bytes 64 bytes from 192.168.6.1: seq=0 ttl=63 time=1.535 ms 64 bytes from 192.168.6.1: seq=1 ttl=63 time=3.302 ms ^C --- 192.168.6.1 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.954/1.930/3.302 ms
/ # ping www.baidu.com PING www.baidu.com (14.215.177.39): 56 data bytes 64 bytes from 14.215.177.39: seq=0 ttl=49 time=43.405 ms 64 bytes from 14.215.177.39: seq=1 ttl=49 time=42.250 ms ^C --- www.baidu.com ping statistics --- 2 packets transmitted, 2 packets received, 0% packet loss round-trip min/avg/max = 42.250/42.827/43.405 ms / #
8.4 宿主机网卡及iptables 规则
[root@component ~]# ip a 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 00:50:56:31:7c:c2 brd ff:ff:ff:ff:ff:ff inet 192.168.6.200/24 brd 192.168.6.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet6 fe80::8a81:e941:8c9c:b326/64 scope link noprefixroute valid_lft forever preferred_lft forever 3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:42:14:1e:ee:bf brd ff:ff:ff:ff:ff:ff inet 10.0.1.1/24 brd 10.0.1.255 scope global docker0 valid_lft forever preferred_lft forever inet6 fe80::42:14ff:fe1e:eebf/64 scope link valid_lft forever preferred_lft forever 16: br-2506a661b1ef: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether 02:42:b6:72:39:7d brd ff:ff:ff:ff:ff:ff inet 172.67.0.1/16 brd 172.67.255.255 scope global br-2506a661b1ef valid_lft forever preferred_lft forever inet6 fe80::42:b6ff:fe72:397d/64 scope link valid_lft forever preferred_lft forever 18: vethf8c97b1@if17: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master br-2506a661b1ef state UP group default link/ether 6a:c3:4e:19:d2:57 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet6 fe80::68c3:4eff:fe19:d257/64 scope link valid_lft forever preferred_lft forever 20: veth7f9bdea@if19: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default link/ether 8a:6e:7b:b1:72:0b brd ff:ff:ff:ff:ff:ff link-netnsid 1 inet6 fe80::886e:7bff:feb1:720b/64 scope link valid_lft forever preferred_lft forever
[root@component ~]# iptables -nvL -t nat Chain PREROUTING (policy ACCEPT 252 packets, 64353 bytes) pkts bytes target prot opt in out source destination 1034 242K PREROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0 1034 242K PREROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0 1034 242K PREROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 DOCKER all -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL Chain INPUT (policy ACCEPT 0 packets, 0 bytes) pkts bytes target prot opt in out source destination Chain OUTPUT (policy ACCEPT 18 packets, 1368 bytes) pkts bytes target prot opt in out source destination 177 13246 OUTPUT_direct all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 DOCKER all -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL Chain POSTROUTING (policy ACCEPT 18 packets, 1368 bytes) pkts bytes target prot opt in out source destination 0 0 MASQUERADE all -- * !br-2506a661b1ef 172.67.0.0/16 0.0.0.0/0 0 0 MASQUERADE all -- * !docker0 10.0.1.0/24 0.0.0.0/0 177 13246 POSTROUTING_direct all -- * * 0.0.0.0/0 0.0.0.0/0 177 13246 POSTROUTING_ZONES_SOURCE all -- * * 0.0.0.0/0 0.0.0.0/0 177 13246 POSTROUTING_ZONES all -- * * 0.0.0.0/0 0.0.0.0/0 Chain DOCKER (2 references) pkts bytes target prot opt in out source destination 0 0 RETURN all -- br-2506a661b1ef * 0.0.0.0/0 0.0.0.0/0 0 0 RETURN all -- docker0 * 0.0.0.0/0 0.0.0.0/0 Chain OUTPUT_direct (1 references) pkts bytes target prot opt in out source destination Chain POSTROUTING_ZONES (1 references) pkts bytes target prot opt in out source destination 0 0 POST_docker all -- * br-2506a661b1ef 0.0.0.0/0 0.0.0.0/0 [goto] 0 0 POST_docker all -- * docker0 0.0.0.0/0 0.0.0.0/0 [goto] 177 13246 POST_public all -- * ens33 0.0.0.0/0 0.0.0.0/0 [goto] 0 0 POST_public all -- * + 0.0.0.0/0 0.0.0.0/0 [goto] Chain POSTROUTING_ZONES_SOURCE (1 references) pkts bytes target prot opt in out source destination Chain POSTROUTING_direct (1 references) pkts bytes target prot opt in out source destination Chain POST_docker (2 references) pkts bytes target prot opt in out source destination 0 0 POST_docker_log all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 POST_docker_deny all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 POST_docker_allow all -- * * 0.0.0.0/0 0.0.0.0/0 Chain POST_docker_allow (1 references) pkts bytes target prot opt in out source destination Chain POST_docker_deny (1 references) pkts bytes target prot opt in out source destination Chain POST_docker_log (1 references) pkts bytes target prot opt in out source destination Chain POST_public (2 references) pkts bytes target prot opt in out source destination 177 13246 POST_public_log all -- * * 0.0.0.0/0 0.0.0.0/0 177 13246 POST_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0 177 13246 POST_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0 Chain POST_public_allow (1 references) pkts bytes target prot opt in out source destination Chain POST_public_deny (1 references) pkts bytes target prot opt in out source destination Chain POST_public_log (1 references) pkts bytes target prot opt in out source destination Chain PREROUTING_ZONES (1 references) pkts bytes target prot opt in out source destination 0 0 PRE_docker all -- br-2506a661b1ef * 0.0.0.0/0 0.0.0.0/0 [goto] 0 0 PRE_docker all -- docker0 * 0.0.0.0/0 0.0.0.0/0 [goto] 1034 242K PRE_public all -- ens33 * 0.0.0.0/0 0.0.0.0/0 [goto] 0 0 PRE_public all -- + * 0.0.0.0/0 0.0.0.0/0 [goto] Chain PREROUTING_ZONES_SOURCE (1 references) pkts bytes target prot opt in out source destination Chain PREROUTING_direct (1 references) pkts bytes target prot opt in out source destination Chain PRE_docker (2 references) pkts bytes target prot opt in out source destination 0 0 PRE_docker_log all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 PRE_docker_deny all -- * * 0.0.0.0/0 0.0.0.0/0 0 0 PRE_docker_allow all -- * * 0.0.0.0/0 0.0.0.0/0 Chain PRE_docker_allow (1 references) pkts bytes target prot opt in out source destination Chain PRE_docker_deny (1 references) pkts bytes target prot opt in out source destination Chain PRE_docker_log (1 references) pkts bytes target prot opt in out source destination Chain PRE_public (2 references) pkts bytes target prot opt in out source destination 1034 242K PRE_public_log all -- * * 0.0.0.0/0 0.0.0.0/0 1034 242K PRE_public_deny all -- * * 0.0.0.0/0 0.0.0.0/0 1034 242K PRE_public_allow all -- * * 0.0.0.0/0 0.0.0.0/0 Chain PRE_public_allow (1 references) pkts bytes target prot opt in out source destination Chain PRE_public_deny (1 references) pkts bytes target prot opt in out source destination Chain PRE_public_log (1 references) pkts bytes target prot opt in out source destination [root@component ~]#
8.5 错误: 修改自定义网桥名称
[root@cl-server docker]# ip link set dev br-4e1bb0f17677 name mybr0 RTNETLINK answers: Device or resource busy
[root@cl-server docker]# ip link set dev br-4e1bb0f17677 down [root@cl-server docker]# ip link set dev br-4e1bb0f17677 name mybr0 [root@cl-server docker]# ip link set dev mybr0 up
[root@cl-server docker]# docker run --name sd1 -it --net mybr0 busybox:latest docker: Error response from daemon: failed to create endpoint sd1 on network mybr0: adding interface veth9b55094 to bridge br-4e1bb0f17677 failed: could not find bridge br-4e1bb0f17677: route ip+net: no such network interface. ERRO[0000] error waiting for container: context canceled
8.6 主机转发
[root@cl-server ~]# cat /proc/sys/net/ipv4/ip_forward 1