This is an old revision of the document!
nodeN# apt install ceph
nodeN# cat /etc/ceph/ceph.conf
[global] fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993 mon initial members = node3,node4,node5 mon host = 192.168.X.3,192.168.X.4,192.168.X.5 public network = 192.168.X.0/24 auth cluster required = none auth service required = none auth client required = none
scp /etc/ceph/ceph.conf node4:/etc/ceph/ scp /etc/ceph/ceph.conf node5:/etc/ceph/
monmaptool --create --add node3 192.168.X.3 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap monmaptool --add node4 192.168.X.4 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap monmaptool --add node5 192.168.X.5 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
monmaptool --print /tmp/monmap
scp /tmp/monmap node4:/tmp/ scp /tmp/monmap node5:/tmp/
node3# bash -c ' sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3 ssh node4 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node4 ssh node5 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node5 '
node3# bash -c ' sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap ssh node4 sudo -u ceph ceph-mon --mkfs -i node4 --monmap /tmp/monmap ssh node5 sudo -u ceph ceph-mon --mkfs -i node5 --monmap /tmp/monmap '
node3# ls -l /var/lib/ceph/mon/ceph-node3
node3# bash -c ' systemctl start ceph-mon@node3 systemctl enable ceph-mon@node3 ssh node4 systemctl start ceph-mon@node4 ssh node4 systemctl enable ceph-mon@node4 ssh node5 systemctl start ceph-mon@node5 ssh node5 systemctl enable ceph-mon@node5 '
debian11/ubuntu# ceph mon enable-msgr2 debian11/ubuntu# ceph config set mon mon_warn_on_insecure_global_id_reclaim false debian11/ubuntu# ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false
# ceph -s # ps ax | grep ceph
# ceph health detail
MON_DOWN 1/3 mons down, quorum node4,node5 mon.node3 (rank 0) addr 192.168.X.3:6789/0 is down (out of quorum)
node3# bash -c ' sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3 systemctl start ceph-mgr@node3 systemctl enable ceph-mgr@node3 ssh node4 sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node4 ssh node4 systemctl start ceph-mgr@node4 ssh node4 systemctl enable ceph-mgr@node4 ssh node5 sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node5 ssh node5 systemctl start ceph-mgr@node5 ssh node5 systemctl enable ceph-mgr@node5 '
# ceph -s # ps ax | grep ceph
# lsblk
node3# bash -c ' ceph-volume lvm create --data /dev/sdb ssh node4 ceph-volume lvm create --data /dev/sdb ssh node5 ceph-volume lvm create --data /dev/sdb '
# vgs # ps ax | grep ceph # ceph -s # ceph osd stat # ceph osd tree # ceph osd df
node3# scp /etc/ceph/ceph.conf node6:/etc/ceph/ node3# ssh node6 ceph-volume lvm create --data /dev/sdb node3# watch ceph -s
ceph osd safe-to-destroy osd.0 OSD(s) 0 are safe to destroy without reducing data durability. ceph osd destroy 0 --yes-i-really-mean-it ceph osd purge 0 --yes-i-really-mean-it
ceph osd crush reweight osd.2 0 ceph osd crush reweight osd.2 0.00389 node3# ceph osd df ID CLASS WEIGHT REWEIGHT SIZE USE AVAIL %USE VAR PGS 0 hdd 0 0.83000 4.00GiB 1.01GiB 2.99GiB 25.30 0.37 0 1 hdd 0.00389 0.83000 4.00GiB 2.78GiB 1.21GiB 69.66 1.02 74 2 hdd 0.00389 0.83000 4.00GiB 3.04GiB 980MiB 76.04 1.11 81 3 hdd 0.00389 1.00000 4.00GiB 3.43GiB 575MiB 85.96 1.26 99 4 hdd 0.00389 1.00000 4.00GiB 3.38GiB 631MiB 84.58 1.24 102 ceph osd reweight osd.2 1
Total PGs = OSDs * 100 / pool_size 5*100/3 = 166,6...
# ceph osd pool create test-pool1 180 debian11/ubuntu# ceph osd pool create test-pool1 128 или debian11/ubuntu# ceph config set global mon_warn_on_pool_pg_num_not_power_of_two false # ceph -s
... pgs: 180 active+clean
# ceph pg dump # ceph osd lspools
ceph osd pool set test-pool1 pg_num 250 ceph osd pool set test-pool1 pgp_num 250 ceph osd pool get test-pool1 size ceph osd pool get test-pool1 min_size ceph osd pool set test-pool1 size 2 ceph osd pool set test-pool1 size 3
# ceph df # rbd pool init test-pool1 # rbd create -p test-pool1 rbd1 --size 1G # rbd list test-pool1 # rbd info test-pool1/rbd1
# rbd resize --size 3584M test-pool1/rbd1 node3# bash -c ' systemctl restart tgt ssh node4 systemctl restart tgt ssh node5 systemctl restart tgt '
nodeN# apt install tgt-rbd nodeN# cat /etc/tgt/conf.d/ceph.conf
<target virtual-ceph-test-pool1-rbd1:iscsi> driver iscsi bs-type rbd backing-store test-pool1/rbd1 initiator-address ALL </target>
scp /etc/tgt/conf.d/ceph.conf node4:/etc/tgt/conf.d/ scp /etc/tgt/conf.d/ceph.conf node5:/etc/tgt/conf.d/
node3# bash -c ' systemctl restart tgt ssh node4 systemctl restart tgt ssh node5 systemctl restart tgt '
systemctl status tgt === https://ceph.io/planet/find-the-osd-location/ # ceph osd find 0 ceph health detail === === https://medium.com/flant-com/3-cases-from-our-ceph-experience-c46efea0e527 Case #1. Gracefully taking out OSDs from the Ceph cluster ... ceph tell 'osd.*' injectargs --osd-max-backfills=1 ...