天天看點

corosync+pacemaker+crm簡單配置

# 系統 centos7.2      安裝版本都是Yum源

 node1: 192.168.8.111    

 node2:192.168.8.112   

 vip     :192.168.8.200

 nfs     :192.168.8.113 

# 互信

~] ssh-keygen

~]# cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

~]# chmod go= .ssh/authorized_keys

~]# scp -p .ssh/id_rsa .ssh/authorized_keys node1:/root/.ssh/

# 安裝pcs

~]# ansible ha -m service -a "name=pcs state=installd"

# 同步時間

~]# ntpdate 192.168.9.19;ssh node2 ntpdate 192.168.9.19

# 啟動服務

~]# ansible ha -m service -a "name=pcsd state=started enabled=yes"

# 設定hacluster密碼

~]# ansible ha -m shell -a 'echo "xiong" | passwd --stdin hacluster' 

# 認證叢集服務

~]# pcs cluster auth node1 node2

Username: hacluster

Password: 

node1: Authorized

node2: Authorized

# 将兩個節點加入叢集服務内 --name叢集名稱 節點 廣播位址

~]# pcs cluster setup --name myha node1 node2 

Shutting down pacemaker/corosync services...

Redirecting to /bin/systemctl stop  pacemaker.service

Redirecting to /bin/systemctl stop  corosync.service

Killing any remaining services...

Removing all cluster configuration files...

node1: Succeeded

node2: Succeeded

Synchronizing pcsd certificates on nodes node1, node2...

node1: Success

node2: Success

Restaring pcsd on the nodes in order to reload the certificates...

# 加入叢集之後會産生corosync配置檔案

~]# ls /etc/corosync/

corosync.conf  corosync.conf.example  corosync.conf.example.udpu  corosync.xml.example  uidgid.d

# 啟動pcs服務

~]# pcs cluster start --all

node2: Starting Cluster...

node1: Starting Cluster...

# 跟蹤檢視日志

tail -f  /var/log/cluster/corosync.log

# 檢視通信是否正常

~]# corosync-cfgtool -s

Printing ring status.

Local node ID 1

RING ID 0

id= 127.0.0.1 # 當為127.0.0.1時說明叢集是失敗的 更改/etc/hosts将127.0.0.1 主機名删除 隻留預設項

status= ring 0 active with no faults

Current DC: 指定的協調員

# crmsh yum源

[network_ha-clustering_Stable]

name=Stable High Availability/Clustering packages (CentOS_CentOS-7)

type=rpm-md

baseurl=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7/

gpgcheck=1

gpgkey=http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-7//repodata/repomd.xml.key

enabled=1

# 安裝crmsh pssh  在哪台機器上操作就安哪一台就成

~]# yum -y install crmsh pssh

# nfs配置

~]# ansible ha -m yum -a "name=nfs-utils state=installed"

~]# vim /etc/exports

/opt/pages      192.168.8.0/24(rw)

# 安裝nginx并測試nfs挂載是否生效

~]# ansible ha -m yum -a "name=nginx state=installed"

~]# ansible ha -m shell -a "mount -t nfs 192.168.8.113:/opt/pages /usr/share/nginx/html/"

~]# ansible ha -m shell -a  "systemctl start nginx"

~]# ansible ha -m shell -a  "umount /usr/share/nginx/html"

~]# ansible ha -m shell -a  "df -Th"

# 每次操作需要先 verify 最後完成之後再執行 commit

# 定義虛拟IP位址

crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip="192.168.8.200"

# 定義nginx服務

crm(live)configure# primitive vipservice systemd:nginx op monitor interval=30s timeout=20s

# 定義NFS服務

crm(live)configure# primitive vipnfs ocf:heartbeat:Filesystem params device="192.168.8.113:/opt/pages" directory="/usr/share/nginx/html/" fstype="nfs" op start timeout=60s op stop timeout=60s op  monitor timeout=40 interval=20

# 定義排列限制,   解析: A ( B C )   AB AC   vipservice即要跟vip也得跟vipnfs在一起

crm(live)configure# colocation vipservice_with_vip_and_vipnfs inf: vipservice ( vip vipnfs )

# 定義順序限制 啟動順序    強制先啟動 vip再啟動 vipnfs

crm(live)configure# order vipnfs_after_vip  Mandatory: vip vipnfs

# 強制先啟動 vipnfs 再啟動 vipservice 

crm(live)configure# order vipservice_after_vipnfs Mandatory: vipnfs vipservice

# 檢查服務是否正常

crm(live)# status

Last updated: Thu May 18 16:00:41 2017Last change: Thu May 18 16:00:36 2017 by root via cibadmin on node1

Stack: corosync

Current DC: node2 (version 1.1.13-10.el7-44eb2dd) - partition with quorum

2 nodes and 3 resources configured

Online: [ node1 node2 ]

Full list of resources:

 vip(ocf::heartbeat:IPaddr2):Started node1

 vipservice(systemd:nginx):Started node1

 vipnfs(ocf::heartbeat:Filesystem):Started node1

# 退出crmsh

# 分别執行ss -tnl | grep 80      df -Th    ip addr show  最後執行完成有  将node1強制成為standby檢查node2節點

# 設定vip資源黏性為100 傾向于node1  

crm(live)configure# location node1_vip vip 100: node1

# 常見錯誤

# Node node1: UNCLEAN (offline)   檢查 corosync-cfgtools -s 檢視IP位址是不是127.0.0.1如果是删除127.1配置的主機名稱

[root@node2 ~]# pcs status

Cluster name: myha

WARNING: no stonith devices and stonith-enabled is not false

Last updated: Wed May 17 15:34:53 2017Last change: Wed May 17 15:31:50 2017 by hacluster via crmd on node2

Current DC: node2 (version 1.1.13-10.el7-44eb2dd) - partition WITHOUT quorum

2 nodes and 0 resources configured

Node node1: UNCLEAN (offline)

Online: [ node2 ]

# WARNING: no stonith devices and stonith-enabled is not false

解決:  pcs property set stonith-enabled=false

     本文轉自812374156 51CTO部落格,原文連結:http://blog.51cto.com/xiong51/1927204,如需轉載請自行聯系原作者

繼續閱讀