User Tools

Site Tools


решение_ceph

This is an old revision of the document!


Решение Ceph

Развертывание

Установка

nodeN# apt install ceph

MON

Начальная конфигурация

nodeN# cat /etc/ceph/ceph.conf
[global]
	fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
	mon initial members = node3,node4,node5
	mon host = 192.168.X.3,192.168.X.4,192.168.X.5
	public network = 192.168.X.0/24
	auth cluster required = none
	auth service required = none
	auth client required = none
scp /etc/ceph/ceph.conf node4:/etc/ceph/
scp /etc/ceph/ceph.conf node5:/etc/ceph/
monmaptool --create --add node3 192.168.X.3 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
monmaptool --add node4 192.168.X.4 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
monmaptool --add node5 192.168.X.5 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
monmaptool --print /tmp/monmap
scp /tmp/monmap node4:/tmp/
scp /tmp/monmap node5:/tmp/
node3# bash -c '
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3
ssh node4 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node4
ssh node5 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node5
'
node3# bash -c '
sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap
ssh node4 sudo -u ceph ceph-mon --mkfs -i node4 --monmap /tmp/monmap
ssh node5 sudo -u ceph ceph-mon --mkfs -i node5 --monmap /tmp/monmap
'
node3# ls -l /var/lib/ceph/mon/ceph-node3
node3# bash -c '
systemctl start ceph-mon@node3
systemctl enable ceph-mon@node3
ssh node4 systemctl start ceph-mon@node4
ssh node4 systemctl enable ceph-mon@node4
ssh node5 systemctl start ceph-mon@node5
ssh node5 systemctl enable ceph-mon@node5
'
# ceph -s

# ps ax | grep ceph

Поиск проблемных узлов

# ceph health detail
MON_DOWN 1/3 mons down, quorum node4,node5
    mon.node3 (rank 0) addr 192.168.X.3:6789/0 is down (out of quorum)

MGR

node3# bash -c '
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3
systemctl start ceph-mgr@node3
systemctl enable ceph-mgr@node3

ssh node4 sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node4
ssh node4 systemctl start ceph-mgr@node4
ssh node4 systemctl enable ceph-mgr@node4

ssh node5 sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node5
ssh node5 systemctl start ceph-mgr@node5
ssh node5 systemctl enable ceph-mgr@node5
'
# ceph -s

# ps ax | grep ceph

OSD

Начальная конфигурация

# lsblk
node3# bash -c '
ceph-volume lvm create --data /dev/sdb
ssh node4 ceph-volume lvm create --data /dev/sdb
ssh node5 ceph-volume lvm create --data /dev/sdb
'
# vgs

# ps ax | grep ceph

# ceph -s

# ceph osd stat

# ceph osd tree

# ceph osd df

Добавление узла OSD

node3# scp /etc/ceph/ceph.conf node6:/etc/ceph/

node3# ssh node6 ceph-volume lvm create --data /dev/sdb

node3# watch ceph -s

Удаление узла OSD

ceph osd safe-to-destroy osd.0
OSD(s) 0 are safe to destroy without reducing data durability.

ceph osd destroy 0 --yes-i-really-mean-it
ceph osd purge 0 --yes-i-really-mean-it

Управление приоритетом использования узла OSD

ceph osd crush reweight osd.2 0
ceph osd crush reweight osd.2 0.00389

node3# ceph osd df
ID CLASS WEIGHT  REWEIGHT SIZE    USE     AVAIL   %USE  VAR  PGS
 0   hdd       0  0.83000 4.00GiB 1.01GiB 2.99GiB 25.30 0.37   0
 1   hdd 0.00389  0.83000 4.00GiB 2.78GiB 1.21GiB 69.66 1.02  74
 2   hdd 0.00389  0.83000 4.00GiB 3.04GiB  980MiB 76.04 1.11  81
 3   hdd 0.00389  1.00000 4.00GiB 3.43GiB  575MiB 85.96 1.26  99
 4   hdd 0.00389  1.00000 4.00GiB 3.38GiB  631MiB 84.58 1.24 102

ceph osd reweight osd.2 1

OSD POOL

Расчет параметров и создание POOL

Total PGs = OSDs * 100 / pool_size

5*100/3 = 166,6...
# ceph osd pool create test-pool1 180

# ceph -s
...
    pgs:     180 active+clean
# ceph pg dump

# ceph osd lspools

Изменение параметров POOL

ceph osd pool set test-pool1 pg_num 250
ceph osd pool set test-pool1 pgp_num 250

ceph osd pool get test-pool1 size
ceph osd pool get test-pool1 min_size

ceph osd pool set test-pool1 size 2
ceph osd pool set test-pool1 size 3

RBD POOL

# ceph df

# rbd pool init test-pool1

# rbd create -p test-pool1 rbd1 --size 1G

# rbd list test-pool1

# rbd info test-pool1/rbd1
# rbd resize --size 3584M test-pool1/rbd1

node3# bash -c '
systemctl restart tgt
ssh node4 systemctl restart tgt
ssh node5 systemctl restart tgt
'

TGT-RBD

nodeN# apt install tgt-rbd

nodeN# cat /etc/tgt/conf.d/ceph.conf
<target virtual-ceph-test-pool1-rbd1:iscsi>
    driver iscsi
    bs-type rbd
    backing-store test-pool1/rbd1
    initiator-address ALL
</target>
scp /etc/tgt/conf.d/ceph.conf node4:/etc/tgt/conf.d/
scp /etc/tgt/conf.d/ceph.conf node5:/etc/tgt/conf.d/
node3# bash -c '
systemctl restart tgt
ssh node4 systemctl restart tgt
ssh node5 systemctl restart tgt
'
systemctl status tgt

===
https://ceph.io/planet/find-the-osd-location/

# ceph osd find 0


ceph health detail


===


===
https://medium.com/flant-com/3-cases-from-our-ceph-experience-c46efea0e527
Case #1. Gracefully taking out OSDs from the Ceph cluster
...
ceph tell 'osd.*' injectargs --osd-max-backfills=1
...
решение_ceph.1623995960.txt.gz · Last modified: 2021/06/18 08:59 by admin