1.docker单机容器互通
# 单机容器,busybox里很多命令都有采用这个容器
1# docker run -it --rm busybox
2# docker run -it --rm busybox
# 进入容器后发现2个ip分别为172.170.2和172.170.3,互ping是可以通的
# 采取主机名别名的方式实现容器互通
1# docker run -it --name busybox1 busybox
2# docker run -it --link busybox1:busybox1(别名) --name busybox2 busybox
# 查看容器2的hosts文件,然后查看下容器1的ip发现和hosts文件里一致
172.17.0.2 busybox1 e9dd2c37885f
172.17.0.3 93e6abefe9a2
# 利用docker网络模式,默认3中模式,
# 如果采用host模式,网卡信息会和宿主机一致
[root@ ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6b3e660206d2 bridge bridge local
44dabd94864d host host local
cce7c0793987 none null local
# 指定host模式
1# docker run -it --net=host nginx
2# docker run -it --net=host nginx
#容器2会提示端口已被占用
# host模式优点:能充分利用网卡的性能,性能最高;缺点:容易对本地端口进行占用,从而导致只能开一个容器这种情况;除非对网络要求很高的情况下才会取用这种模式,否则很少使用这种方式
2.docker网络说明
# 创建属于自己的网络,可以看到容器的网络从之前172.17变为172.18网段了
[root@ ~]# docker network create --driver bridge my_net #返回字符串就说明成功了
f599f081c34922ffaecfc707581bd2db42f2c616c3095e5a6ffa9f78dc96e46a
[root@zabbix ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6b3e660206d2 bridge bridge local
44dabd94864d host host local
f599f081c349 my_net bridge local
cce7c0793987 none null local
[root@ ~]# docker run --rm -it --network=my_net busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
13: eth0@if14: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 创建指定的网络段,--driver 指定驱动
[root@ ~]# docker network create --driver bridge --subnet 172.22.16.0/24 --gateway 172.22.16.1 my_net2
65423c9870bf1b6ec5aaa4eccc50b667aafa53c2fb34365371a2bd59910f7718
#指定创建容器的ip, --network指定网络 --ip指定IP
[root@ ~]# docker run -it --rm --network=my_net2 --ip 172.22.16.88 busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
16: eth0@if17: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:16:10:58 brd ff:ff:ff:ff:ff:ff
inet 172.22.16.88/24 brd 172.22.16.255 scope global eth0
valid_lft forever preferred_lft forever
# 在容器类互ping cxiong和cxiong2可以发现互通,只对用户自定义的网络有效,默认无效,ping 172.17.0.1也是可以通的(注意cxiong这种名字创建前不能ping通)
1# docker run -it --rm --network=my_net2 --ip 172.22.16.88 --name cxiong busybox
2# docker run -it --rm --network=my_net2 --ip 172.22.16.99 --name cxiong2 busybox
# 外网查看可以发现新增了网络段
[root@ ~]# ifconfig
br-65423c9870bf: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.22.16.1 netmask 255.255.255.0 broadcast 172.22.16.255
inet6 fe80::42:9eff:fef7:53eb prefixlen 64 scopeid 0x20<link>
ether 02:42:9e:f7:53:eb txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
br-f599f081c349: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.18.0.1 netmask 255.255.0.0 broadcast 172.18.255.255
inet6 fe80::42:35ff:fe96:843a prefixlen 64 scopeid 0x20<link>
ether 02:42:35:96:84:3a txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
inet6 fe80::42:a6ff:fe51:18b2 prefixlen 64 scopeid 0x20<link>
ether 02:42:a6:51:18:b2 txqueuelen 0 (Ethernet)
RX packets 6 bytes 336 (336.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 6 bytes 488 (488.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
# 自定义网络和默认网络原则上是不通的,如果需要ping通,采用docker network connect模式处理,默认容器之间是采用防火墙的
1# docker run -it --rm --network=my_net2 --ip 172.22.16.88 --name cxiong busybo
2# docker run -it --rm --name xc busybox
# 这样就可以让172.17中的容器可以和指定my_net2的网络互通了
[root@ ~]# docker network connect my_net2 xc
# 这时候可以看下容器2:xc的网络,可以发现多了一条网卡
32: eth1@if33: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:16:10:02 brd ff:ff:ff:ff:ff:ff
inet 172.22.16.2/24 brd 172.22.16.255 scope global eth1
valid_lft forever preferred_lft forever
3.docker 跨主机互通
环境:
master:192168.0.110
node1:192.168.0.111
node2:192.168.0.112
node2:
docker pull progrium/consul
docker run -d -p 8500:8500 --name consul progrium/consul -server -bootstrap
[root@~]# netstat -ntlp | grep 8500
tcp 0 0 0.0.0.0:8500 0.0.0.0:* LISTEN 3916/docker-proxy
tcp6 0 0 :::8500 :::* LISTEN 3920/docker-proxy
#浏览器访问:http://192.1680.112:8500可以看到页面
# node2机器
[root@node2 ~]# dockerd --help| grep cluster
--cluster-advertise string Address or interface name to advertise
--cluster-store string URL of the distributed storage backend
--cluster-store-opt map Set cluster store options (default map[])
# 修改master和node1中的daemon.json
[root@Master ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://pft7f97f.mirror.aliyuncs.com"],
"cluster-store":"consul://192.168.0.112:8500",
"cluster-advertise":"192.168.0.110:2375"
}
# 重新加载文件
systemctl daemon-reload
systemctl restart docker
# 可以在http://192.1680.112:8500页面KEY/VALUE按钮中的docker/nodes中可以看见刚添加的2台服务器IP
http://192.168.0.112:8500/ui/#/dc1/kv/docker/nodes/
# master:
[root@Master ~]# docker network create -d overlay ov_net1
cbc71dc4f500d2696b7d09f0746391bfaa6c33acfb24403665167d261b46381f
# 在master和node1上查看都可以发现 ov_net1这个全局网络
[root@node1 docker]# docker network ls
NETWORK ID NAME DRIVER SCOPE
9a39eae301f8 bridge bridge local
5402b01d020a host host local
bad1177e933d none null local
cbc71dc4f500 ov_net1 overlay global
# 在master和node1上创建容器,可以看到ip都有一个eth1@if19:172.18.0.2,以及各自的IP分别为:10.0.0.2和10.0.0.3,并且都可以ping通
[root@Master ~]# docker run -it --rm --network ov_net1 busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:00:00:02 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.2/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
13: eth1@if14: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
[root@Master ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
7e7b59735ffb bridge bridge local
85e168138e1d docker_gwbridge bridge local
ea7935cbd3d8 host host local
9851abfa9ad5 none null local
cbc71dc4f500 ov_net1 overlay global
# docker_gwbridge 保证容器可以互通
/ # route -n
也可以指定网段进行创建
docker network create -d overlay --subnet 10.10.0.0/16 ov_net2
# 服务端
docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --rm --name elk sebp/elk
# 访问http://192.168.0.112:9200/_search?pretty
mkdir /fluentd_log
docker run -d -p 24224:24224 -p 24224:24224/udp -v /fluentd_log:/fluentd/log fluent/fluentd
# 客户端安装nginx镜像
# 客户端安装filebeat,修改filebeat配置文件
vim /etc/filebeat/filebeat.yml
# Filebeat inputs
- type: log
# Change to true to enable this input configuration.
enabled: true # 启用
# Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/*.log
- /var/lib/docker/containers/*/*.log # 镜像日志放置位置
# 客户端
docker run -it -p 80:80 --name nginx nginx
# 访问nginx,可以看到
未完成:elk+filebeat收集nginx日志