1、ceph -s 檢視叢集狀态
[root@admin-node ~]# ceph -s
cluster 99f00338-a334-4f90-a579-496a934f25c0
health HEALTH_WARN
109 pgs degraded
9 pgs recovering
96 pgs recovery_wait
109 pgs stuck unclean
recovery 38892/105044 objects degraded (37.024%)
monmap e1: 1 mons at {admin-node=192.168.13.171:6789/0}
election epoch 3, quorum 0 admin-node
osdmap e146: 20 osds: 20 up, 20 in
flags sortbitwise
pgmap v6669: 320 pgs, 3 pools, 205 GB data, 52522 objects
424 GB used, 74059 GB / 74484 GB avail
38892/105044 objects degraded (37.024%)
211 active+clean
96 active+recovery_wait+degraded
9 active+recovering+degraded
4 active+degraded
recovery io 95113 kB/s, 23 objects/s
client io 215 MB/s rd, 53 op/s rd, 0 op/s wr
2、ceph health 檢視叢集狀态
[root@admin-node ~]# ceph health
HEALTH_WARN 104 pgs degraded; 7 pgs recovering; 93 pgs recovery_wait; 104 pgs stuck unclean; recovery 36306/105044 objects degraded (34.563%)
3、ceph osd tree 檢查OSD的CRUSH map
[root@admin-node ~]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 20.00000 root default
-2 10.00000 host node2
0 1.00000 osd.0 up 1.00000 1.00000
1 1.00000 osd.1 up 1.00000 1.00000
2 1.00000 osd.2 up 1.00000 1.00000
3 1.00000 osd.3 up 1.00000 1.00000
4 1.00000 osd.4 up 1.00000 1.00000
5 1.00000 osd.5 up 1.00000 1.00000
6 1.00000 osd.6 up 1.00000 1.00000
7 1.00000 osd.7 up 1.00000 1.00000
8 1.00000 osd.8 up 1.00000 1.00000
9 1.00000 osd.9 up 1.00000 1.00000
-3 10.00000 host node3
12 1.00000 osd.12 up 1.00000 1.00000
13 1.00000 osd.13 up 1.00000 1.00000
14 1.00000 osd.14 up 1.00000 1.00000
15 1.00000 osd.15 up 1.00000 1.00000
16 1.00000 osd.16 up 1.00000 1.00000
17 1.00000 osd.17 up 1.00000 1.00000
18 1.00000 osd.18 up 1.00000 1.00000
19 1.00000 osd.19 up 1.00000 1.00000
20 1.00000 osd.20 up 1.00000 1.00000
21 1.00000 osd.21 up 1.00000 1.00000
[root@admin-node ~]#
4、ceph df 檢視叢集使用情況
[root@admin-node ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
74484G 74060G 423G 0.57
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 6168M 0.01 36535G 1542
pool1 1 89872M 0.18 36535G 22469
pool2 2 111G 0.21 36535G 28510
5、檢視ceph存儲池
[root@node3 bin]# ceph osd lspools
<rados.Rados object at 0x100166a7258>
('mon', '')
0 rbd,1 pool1,2 pool2,
6、檢視ceph monitor仲裁情況
[root@node3 ~]# ceph quorum_status --format json-pretty
stat auth
end auth
<rados.Rados object at 0x10026787258>
{
"election_epoch": 4,
"quorum": [
],
"quorum_names": [
"admin-node"
"quorum_leader_name": "admin-node",
"monmap": {
"epoch": 1,
"fsid": "99f00338-a334-4f90-a579-496a934f25c0",
"modified": "2016-09-22 04:34:19.863934",
"created": "2016-09-22 04:34:19.863934",
"mons": [
{
"rank": 0,
"name": "admin-node",
"addr": "192.168.13.171:6789\/0"
}
]
}
}
7、導出ceph monitor資訊
[root@node3 ~]# ceph mon dump
<rados.Rados object at 0x10037cc7258>
dumped monmap epoch 1
epoch 1
fsid 99f00338-a334-4f90-a579-496a934f25c0
last_changed 2016-09-22 04:34:19.863934
created 2016-09-22 04:34:19.863934
0: 192.168.13.171:6789/0 mon.admin-node
8、檢查ceph monitor OSD 和PG狀态
[root@node3 ~]# ceph osd stat
<rados.Rados object at 0x10006f77258>
osdmap e277: 20 osds: 20 up, 20 in
[root@node3 ~]#
[root@node3 ~]# ceph pg stat
<rados.Rados object at 0x1002f4f7258>
v19994: 320 pgs: 320 active+clean; 2030 GB data, 658 GB used, 73826 GB / 74484 GB avail
9、列出ceph存儲池
[root@node3 ~]# ceph osd lspools
<rados.Rados object at 0x1000fbd7258>
10 列出叢集的認證密鑰
[root@node3 ~]# ceph auth list
<rados.Rados object at 0x10036697258>
installed auth entries:
osd.0
key: AQCcmONX78yyGBAARd1khxgyH4sWvQZ8MzOK3w==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.1
key: AQA3h+RXLpxgNhAA66aGpLH3BwgCTuedzqEV3g==
osd.12
key: AQC9meNXRJrlIBAAuwj7upKKqLEt/L1x0Rfg1w==
osd.13
key: AQD4nONXCmlEExAAekslqtPJNkUuGdTbUrXzLg==
osd.14
key: AQCfneNXmGkEERAASfUPnf3BV/twV1Ny/Sr5Pg==
osd.15
key: AQCineNXd6l5LxAA7Oi5dIdVneuRK6vLuaL75g==
osd.16
key: AQClneNXPLdlNxAA1Sh9494cgCyOc9Kdu//GOg==
osd.17
key: AQCpneNXj0UnNhAAC7PwDYhbB/XX9EHQIB+HDQ==
osd.18
key: AQCsneNXyHthOhAAJCdCkMJDT6LT/76wgckD/Q==
osd.19
key: AQCwneNXlAjbDRAASTEjJLSKHTVM25GdZ0iWxg==
osd.2
key: AQBDh+RX8z5/KRAARehONU5MOVHvXK0A8NE4tw==
osd.20
key: AQCzneNX4B0fNRAAE9mptVaMXf0s5xNXWCCqsw==
osd.21
key: AQC2neNXbpWOOxAADvLSlnIroJTkfcVai9ZxBw==
osd.3
key: AQBPh+RXJTmxBBAAxhEkPfPPpDecn28z73D2wQ==
osd.4
key: AQBah+RXeZC8DhAARDrqMO1Wip9Qhq/CuHxCOQ==
osd.5
key: AQBlh+RXXfwgIRAAs65Qf1c5brueHPhpDMAv2w==
osd.6
key: AQBxh+RXOxDjMhAA0YoZvHQSx6frea6kwvzE7g==
osd.7
key: AQB/h+RX5o0WNxAAB68I7SY6Ek19fSElWdPA+A==
osd.8
key: AQCPh+RXqSrXKBAAxLt0IIOKzAkwe5+gPJg/Sw==
osd.9
key: AQCfh+RXSwc9FBAAt6cJoPICbo/RqKGhIuTKLw==
client.admin
key: AQBDl+NX6NHFERAAPIruW10XwUpHdUSighhfmg==
auid: 0
caps: [mds] allow
caps: [mon] allow *
本文轉自 OpenStack2015 部落格,原文連結: http://blog.51cto.com/andyliu/1856475 如需轉載請自行聯系原作者