1.docker單機容器互通
# 單機容器,busybox裡很多指令都有采用這個容器
1# docker run -it --rm busybox
2# docker run -it --rm busybox
# 進入容器後發現2個ip分别為172.170.2和172.170.3,互ping是可以通的
# 采取主機名别名的方式實作容器互通
1# docker run -it --name busybox1 busybox
2# docker run -it --link busybox1:busybox1(别名) --name busybox2 busybox
# 檢視容器2的hosts檔案,然後檢視下容器1的ip發現和hosts檔案裡一緻
172.17.0.2 busybox1 e9dd2c37885f
172.17.0.3 93e6abefe9a2
# 利用docker網絡模式,預設3中模式,
# 如果采用host模式,網卡資訊會和主控端一緻
[root@ ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6b3e660206d2 bridge bridge local
44dabd94864d host host local
cce7c0793987 none null local
# 指定host模式
1# docker run -it --net=host nginx
2# docker run -it --net=host nginx
#容器2會提示端口已被占用
# host模式優點:能充分利用網卡的性能,性能最高;缺點:容易對本地端口進行占用,進而導緻隻能開一個容器這種情況;除非對網絡要求很高的情況下才會取用這種模式,否則很少使用這種方式
2.docker網絡說明
# 建立屬于自己的網絡,可以看到容器的網絡從之前172.17變為172.18網段了
[root@ ~]# docker network create --driver bridge my_net #傳回字元串就說明成功了
f599f081c34922ffaecfc707581bd2db42f2c616c3095e5a6ffa9f78dc96e46a
[root@zabbix ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6b3e660206d2 bridge bridge local
44dabd94864d host host local
f599f081c349 my_net bridge local
cce7c0793987 none null local
[root@ ~]# docker run --rm -it --network=my_net busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
13: eth0@if14: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth0
valid_lft forever preferred_lft forever
# 建立指定的網絡段,--driver 指定驅動
[root@ ~]# docker network create --driver bridge --subnet 172.22.16.0/24 --gateway 172.22.16.1 my_net2
65423c9870bf1b6ec5aaa4eccc50b667aafa53c2fb34365371a2bd59910f7718
#指定建立容器的ip, --network指定網絡 --ip指定IP
[root@ ~]# docker run -it --rm --network=my_net2 --ip 172.22.16.88 busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
16: eth0@if17: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:16:10:58 brd ff:ff:ff:ff:ff:ff
inet 172.22.16.88/24 brd 172.22.16.255 scope global eth0
valid_lft forever preferred_lft forever
# 在容器類互ping cxiong和cxiong2可以發現互通,隻對使用者自定義的網絡有效,預設無效,ping 172.17.0.1也是可以通的(注意cxiong這種名字建立前不能ping通)
1# docker run -it --rm --network=my_net2 --ip 172.22.16.88 --name cxiong busybox
2# docker run -it --rm --network=my_net2 --ip 172.22.16.99 --name cxiong2 busybox
# 外網檢視可以發現新增了網絡段
[root@ ~]# ifconfig
br-65423c9870bf: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.22.16.1 netmask 255.255.255.0 broadcast 172.22.16.255
inet6 fe80::42:9eff:fef7:53eb prefixlen 64 scopeid 0x20<link>
ether 02:42:9e:f7:53:eb txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
br-f599f081c349: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.18.0.1 netmask 255.255.0.0 broadcast 172.18.255.255
inet6 fe80::42:35ff:fe96:843a prefixlen 64 scopeid 0x20<link>
ether 02:42:35:96:84:3a txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
inet6 fe80::42:a6ff:fe51:18b2 prefixlen 64 scopeid 0x20<link>
ether 02:42:a6:51:18:b2 txqueuelen 0 (Ethernet)
RX packets 6 bytes 336 (336.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 6 bytes 488 (488.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
# 自定義網絡和預設網絡原則上是不通的,如果需要ping通,采用docker network connect模式處理,預設容器之間是采用防火牆的
1# docker run -it --rm --network=my_net2 --ip 172.22.16.88 --name cxiong busybo
2# docker run -it --rm --name xc busybox
# 這樣就可以讓172.17中的容器可以和指定my_net2的網絡互通了
[root@ ~]# docker network connect my_net2 xc
# 這時候可以看下容器2:xc的網絡,可以發現多了一條網卡
32: eth1@if33: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:16:10:02 brd ff:ff:ff:ff:ff:ff
inet 172.22.16.2/24 brd 172.22.16.255 scope global eth1
valid_lft forever preferred_lft forever
3.docker 跨主機互通
環境:
master:192168.0.110
node1:192.168.0.111
node2:192.168.0.112
node2:
docker pull progrium/consul
docker run -d -p 8500:8500 --name consul progrium/consul -server -bootstrap
[root@~]# netstat -ntlp | grep 8500
tcp 0 0 0.0.0.0:8500 0.0.0.0:* LISTEN 3916/docker-proxy
tcp6 0 0 :::8500 :::* LISTEN 3920/docker-proxy
#浏覽器通路:http://192.1680.112:8500可以看到頁面
# node2機器
[root@node2 ~]# dockerd --help| grep cluster
--cluster-advertise string Address or interface name to advertise
--cluster-store string URL of the distributed storage backend
--cluster-store-opt map Set cluster store options (default map[])
# 修改master和node1中的daemon.json
[root@Master ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://pft7f97f.mirror.aliyuncs.com"],
"cluster-store":"consul://192.168.0.112:8500",
"cluster-advertise":"192.168.0.110:2375"
}
# 重新加載檔案
systemctl daemon-reload
systemctl restart docker
# 可以在http://192.1680.112:8500頁面KEY/VALUE按鈕中的docker/nodes中可以看見剛添加的2台伺服器IP
http://192.168.0.112:8500/ui/#/dc1/kv/docker/nodes/
# master:
[root@Master ~]# docker network create -d overlay ov_net1
cbc71dc4f500d2696b7d09f0746391bfaa6c33acfb24403665167d261b46381f
# 在master和node1上檢視都可以發現 ov_net1這個全局網絡
[root@node1 docker]# docker network ls
NETWORK ID NAME DRIVER SCOPE
9a39eae301f8 bridge bridge local
5402b01d020a host host local
bad1177e933d none null local
cbc71dc4f500 ov_net1 overlay global
# 在master和node1上建立容器,可以看到ip都有一個eth1@if19:172.18.0.2,以及各自的IP分别為:10.0.0.2和10.0.0.3,并且都可以ping通
[root@Master ~]# docker run -it --rm --network ov_net1 busybox
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1450 qdisc noqueue
link/ether 02:42:0a:00:00:02 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.2/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
13: eth1@if14: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue
link/ether 02:42:ac:12:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.2/16 brd 172.18.255.255 scope global eth1
valid_lft forever preferred_lft forever
[root@Master ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
7e7b59735ffb bridge bridge local
85e168138e1d docker_gwbridge bridge local
ea7935cbd3d8 host host local
9851abfa9ad5 none null local
cbc71dc4f500 ov_net1 overlay global
# docker_gwbridge 保證容器可以互通
/ # route -n
也可以指定網段進行建立
docker network create -d overlay --subnet 10.10.0.0/16 ov_net2
# 服務端
docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -it --rm --name elk sebp/elk
# 通路http://192.168.0.112:9200/_search?pretty
mkdir /fluentd_log
docker run -d -p 24224:24224 -p 24224:24224/udp -v /fluentd_log:/fluentd/log fluent/fluentd
# 用戶端安裝nginx鏡像
# 用戶端安裝filebeat,修改filebeat配置檔案
vim /etc/filebeat/filebeat.yml
# Filebeat inputs
- type: log
# Change to true to enable this input configuration.
enabled: true # 啟用
# Paths that should be crawled and fetched. Glob based paths.
paths:
#- /var/log/*.log
- /var/lib/docker/containers/*/*.log # 鏡像日志放置位置
# 用戶端
docker run -it -p 80:80 --name nginx nginx
# 通路nginx,可以看到
未完成:elk+filebeat收集nginx日志