This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
решение_ceph [2021/06/17 09:43] admin [OSD] |
решение_ceph [2024/09/07 14:42] (current) val [Решение Ceph] |
||
---|---|---|---|
Line 8: | Line 8: | ||
* [[http://onreader.mdl.ru/LearningCeph/content/index.html|Практическое руководство по разработке, реализации и управлению вашей управляемой программным обеспечением, массивно масштабируемой системы хранения Ceph]] | * [[http://onreader.mdl.ru/LearningCeph/content/index.html|Практическое руководство по разработке, реализации и управлению вашей управляемой программным обеспечением, массивно масштабируемой системы хранения Ceph]] | ||
+ | * [[http://onreader.mdl.ru/MasteringCeph.2ed/content/index.html|Полное руководство Ceph, 2е изд.]] | ||
+ | * [[http://onreader.mdl.ru/CephCookbook/content/index.html|Книга рецептов Ceph]] | ||
===== Развертывание ===== | ===== Развертывание ===== | ||
* [[https://docs.ceph.com/en/latest/install/manual-deployment/|MANUAL DEPLOYMENT]] | * [[https://docs.ceph.com/en/latest/install/manual-deployment/|MANUAL DEPLOYMENT]] | ||
+ | * [[https://www.server-world.info/en/note?os=Debian_11&p=ceph14&f=1|Debian 11 : Ceph Nautilus]] | ||
==== Установка ==== | ==== Установка ==== | ||
Line 19: | Line 22: | ||
==== MON ==== | ==== MON ==== | ||
+ | |||
+ | * [[https://docs.ceph.com/en/latest/man/8/ceph-mon/|CEPH-MON – CEPH MONITOR DAEMON]] | ||
=== Начальная конфигурация === | === Начальная конфигурация === | ||
Line 45: | Line 50: | ||
scp /tmp/monmap node5:/tmp/ | scp /tmp/monmap node5:/tmp/ | ||
</code><code> | </code><code> | ||
- | bash -c ' | + | node3# bash -c ' |
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3 | sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node3 | ||
ssh node4 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node4 | ssh node4 sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node4 | ||
Line 51: | Line 56: | ||
' | ' | ||
</code><code> | </code><code> | ||
- | bash -c ' | + | node3# bash -c ' |
sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap | sudo -u ceph ceph-mon --mkfs -i node3 --monmap /tmp/monmap | ||
ssh node4 sudo -u ceph ceph-mon --mkfs -i node4 --monmap /tmp/monmap | ssh node4 sudo -u ceph ceph-mon --mkfs -i node4 --monmap /tmp/monmap | ||
Line 57: | Line 62: | ||
' | ' | ||
</code><code> | </code><code> | ||
- | ls -l /var/lib/ceph/mon/ceph-node3 | + | node3# ls -l /var/lib/ceph/mon/ceph-node3 |
</code><code> | </code><code> | ||
- | bash -c ' | + | node3# bash -c ' |
systemctl start ceph-mon@node3 | systemctl start ceph-mon@node3 | ||
systemctl enable ceph-mon@node3 | systemctl enable ceph-mon@node3 | ||
Line 67: | Line 72: | ||
ssh node5 systemctl enable ceph-mon@node5 | ssh node5 systemctl enable ceph-mon@node5 | ||
' | ' | ||
+ | </code> | ||
+ | |||
+ | * [[https://www.suse.com/support/kb/doc/?id=000019960|Cluster status shows a "mons are allowing insecure global_id reclaim" health warning]] | ||
+ | |||
+ | <code> | ||
+ | debian11_12/ubuntu# ceph mon enable-msgr2 | ||
+ | debian11_12/ubuntu# ceph config set mon mon_warn_on_insecure_global_id_reclaim false | ||
+ | debian11_12/ubuntu# ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false | ||
</code><code> | </code><code> | ||
# ceph -s | # ceph -s | ||
Line 81: | Line 94: | ||
==== MGR ==== | ==== MGR ==== | ||
+ | |||
+ | * [[https://docs.ceph.com/en/latest/mgr/index.html|CEPH MANAGER DAEMON]] | ||
<code> | <code> | ||
- | bash -c ' | + | node3# bash -c ' |
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3 | sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-node3 | ||
systemctl start ceph-mgr@node3 | systemctl start ceph-mgr@node3 | ||
Line 103: | Line 118: | ||
==== OSD ==== | ==== OSD ==== | ||
+ | |||
+ | * [[https://docs.ceph.com/en/latest/man/8/ceph-osd/|CEPH-OSD – CEPH OBJECT STORAGE DAEMON]] | ||
=== Начальная конфигурация === | === Начальная конфигурация === | ||
Line 108: | Line 125: | ||
# lsblk | # lsblk | ||
</code><code> | </code><code> | ||
- | bash -c ' | + | node3# bash -c ' |
ceph-volume lvm create --data /dev/sdb | ceph-volume lvm create --data /dev/sdb | ||
ssh node4 ceph-volume lvm create --data /dev/sdb | ssh node4 ceph-volume lvm create --data /dev/sdb | ||
Line 166: | Line 183: | ||
==== OSD POOL ==== | ==== OSD POOL ==== | ||
+ | |||
+ | * [[https://docs.ceph.com/en/latest/rados/operations/pools/|POOLS]] | ||
=== Расчет параметров и создание POOL === | === Расчет параметров и создание POOL === | ||
+ | |||
+ | * [[https://docs.ceph.com/en/latest/rados/operations/placement-groups/|PLACEMENT GROUPS]] | ||
+ | |||
<code> | <code> | ||
- | Total PGs = (Total_number_of_OSD * 100) / max_replication_count | + | Total PGs = OSDs * 100 / pool_size |
5*100/3 = 166,6... | 5*100/3 = 166,6... | ||
+ | </code><code> | ||
+ | # ceph osd pool create test-pool1 180 | ||
- | ceph osd pool create test-pool1 180 | + | debian11_12/ubuntu# ceph osd pool create test-pool1 128 |
+ | или | ||
+ | debian11_12/ubuntu# ceph config set global mon_warn_on_pool_pg_num_not_power_of_two false | ||
- | ceph -s | + | # ceph -s |
+ | </code><code> | ||
... | ... | ||
pgs: 180 active+clean | pgs: 180 active+clean | ||
+ | </code><code> | ||
+ | # ceph pg dump | ||
- | ceph pg dump | + | # ceph osd lspools |
- | + | ||
- | ceph osd lspools | + | |
</code> | </code> | ||
=== Изменение параметров POOL === | === Изменение параметров POOL === | ||
Line 197: | Line 224: | ||
==== RBD POOL ==== | ==== RBD POOL ==== | ||
+ | * Reliable Autonomic Distributed Object Store = RADOS | ||
* [[https://docs.ceph.com/en/latest/rbd/rados-rbd-cmds/|BASIC BLOCK DEVICE COMMANDS]] | * [[https://docs.ceph.com/en/latest/rbd/rados-rbd-cmds/|BASIC BLOCK DEVICE COMMANDS]] | ||
+ | * [[https://docs.ceph.com/en/latest/man/8/rbd/|rbd – manage rados block device (RBD) images]] | ||
<code> | <code> | ||
- | ceph df | + | # ceph df |
- | rbd pool init test-pool1 | + | # rbd pool init test-pool1 |
- | rbd create -p test-pool1 rbd1 --size 1024 | + | |
- | rbd list test-pool1 | + | # rbd create -p test-pool1 rbd1 --size 1G |
- | rbd info test-pool1/rbd1 | + | |
+ | # rbd list test-pool1 | ||
+ | |||
+ | # rbd info test-pool1/rbd1 | ||
</code><code> | </code><code> | ||
- | rbd resize --size 3G test-pool1/rbd1 | + | # rbd resize --size 3584M test-pool1/rbd1 |
- | rbd resize --size 3584M test-pool1/rbd1 | + | |
- | nodeN# systemctl restart tgt | + | node3# bash -c ' |
+ | systemctl restart tgt | ||
+ | ssh node4 systemctl restart tgt | ||
+ | ssh node5 systemctl restart tgt | ||
+ | ' | ||
</code> | </code> | ||
Line 232: | Line 266: | ||
scp /etc/tgt/conf.d/ceph.conf node5:/etc/tgt/conf.d/ | scp /etc/tgt/conf.d/ceph.conf node5:/etc/tgt/conf.d/ | ||
</code><code> | </code><code> | ||
- | bash -c ' | + | node3# bash -c ' |
systemctl restart tgt | systemctl restart tgt | ||
ssh node4 systemctl restart tgt | ssh node4 systemctl restart tgt |