gate1.corp13.un:~# cat /etc/haproxy/haproxy.cfg
...
# use_backend bk-node-ingress if { ssl_fc_sni keycloak.corp13.un }
...
gate1:~# curl https://keycloak.corp13.un/ #-f
gate1:~# while : ; do \
curl -f -d "client_id=any-client" -d "client_secret=anystring" -d "grant_type=password" \
-d "username=user1" -d 'password=kcpassword1' \
https://keycloak.corp13.un/realms/corp13/protocol/openid-connect/token &>/dev/null && \
echo "YES work `date`" || \
echo "NO work `date`"; \
sleep 60; done | tee -a work.log
!!! В классе, если нет второго кластера, .kube/config заменять не нужно
$ cp .kube/config_kube .kube/config
server.corp13.un:~/kubespray# pandoc -t plain docs/recover-control-plane.md | less в новых версиях: server.corp13.un:~/kubespray# pandoc -t plain docs/operations/recover-control-plane.md | less
...
- If your new control plane nodes have new ip addresses you may have
to change settings in various places.
...
server.corp13.un:~# ssh-copy-id kube3
server.corp13.un:~# cat /root/kubespray/inventory/mycluster/hosts.yaml
...
kube3:
ansible_host: 192.168.13.223
ip: 192.168.13.223
access_ip: 192.168.13.223
...
(venv1) server.corp13.un:~# ansible all -a 'sed -i"" -e "/swap/s/^/#/" /etc/fstab' -i /root/kubespray/inventory/mycluster/hosts.yaml --limit=kube3
(venv1) server.corp13.un:~# ansible-playbook conf/ansible/roles/nodes.yml -i /root/kubespray/inventory/mycluster/hosts.yaml -e "variable_host=all name_prefix=kube" --limit=kube3
(venv1) server.corp13.un:~/kubespray# cat inventory/mycluster/hosts.yaml
all:
hosts:
kube1:
ansible_host: 192.168.13.221
ip: 192.168.13.221
access_ip: 192.168.13.221
etcd_member_name: etcd1
kube2:
ansible_host: 192.168.13.222
ip: 192.168.13.222
access_ip: 192.168.13.222
etcd_member_name: etcd2
kube3:
ansible_host: 192.168.13.223
ip: 192.168.13.223
access_ip: 192.168.13.223
etcd_member_name: etcd3
kube4:
ansible_host: 192.168.13.224
ip: 192.168.13.224
access_ip: 192.168.13.224
children:
kube_control_plane:
hosts:
kube1:
kube2:
kube_node:
hosts:
kube1:
kube2:
kube4:
etcd:
hosts:
kube1:
kube2:
kube3:
broken_etcd:
hosts:
kube3:
broken_kube_control_plane:
hosts:
k8s_cluster:
children:
kube_control_plane:
kube_node:
calico_rr:
hosts: {}
(venv1) server.corp13.un:~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml --limit etcd,kube_control_plane -e etcd_retries=10 recover-control-plane.yml ... real 34m6.621s ...
Методические замечания для преподавателя при записи вебинара
(venv1) server.corp13.un:~/kubespray# cat inventory/mycluster/hosts.yaml
...
kube_node:
hosts:
kube1:
kube2:
kube3:
kube4:
...
# broken_etcd:
# hosts:
# kube3:
# broken_kube_control_plane:
# hosts:
...
(venv1) server.corp13.un:~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml --limit=kube3 scale.yml ... real 13m57.709s ... kube3:~# apt install open-iscsi
~/kubespray# pandoc -t plain docs/upgrades.md | less ~/kubespray# pandoc -t plain docs/operations/upgrades.md | less ... Multiple upgrades ... ~/kubespray# git describe --tags ~/kubespray# git tag | more
~/kubespray# git fetch --all ~/kubespray# git checkout v2.23.3
~/kubespray# cp -rvfpT inventory/sample inventory/mycluster
(venv1) ~/kubespray# pip3 install -r requirements.txt kube1:~# kubectl get nodes NAME STATUS ROLES AGE VERSION kube1 Ready control-plane 183d v1.26.11 kube2 Ready control-plane 183d v1.26.11 kube3 Ready <none> 4h44m v1.26.11 kube4 Ready <none> 165d v1.26.11 (venv1) ~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml upgrade-cluster.yml ... real 94m58.471s ... kube1:~# kubectl get nodes NAME STATUS ROLES AGE VERSION kube1 Ready control-plane 183d v1.27.7 kube2 Ready control-plane 183d v1.27.7 kube3 Ready <none> 13h v1.27.7 kube4 Ready <none> 165d v1.27.7
(venv2) server.corp13.un:~/kubespray# git describe --tags v2.26.0 kube1:~# kubectl get nodes NAME STATUS ROLES AGE VERSION kube1 Ready control-plane 188d v1.30.4 kube2 Ready control-plane 188d v1.30.4 kube3 Ready <none> 44h v1.30.4 kube4 Ready <none> 170d v1.30.4
Методические замечания для преподавателя при записи вебинара
kubeN# cat /etc/debian_version kubeN# uname -a kubeN# time (apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y upgrade) ... real 4m39.581s ... kubeN# dpkg --configure -a kubeN# sed -i -e 's/bullseye/bookworm/g' /etc/apt/sources.list kubeN# time (apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y upgrade) ... real 5m55.980s ... kubeN# cat /etc/debian_version kubeN# time DEBIAN_FRONTEND=noninteractive apt -y full-upgrade ... real 5m53.970s ... kubeN# init 6 kubeN# uname -a kubeN# apt autoremove
kubeN# journalctl --vacuum-time=1h kubeN# systemctl disable rsyslog.service; apt purge rsyslog -y; rm -v /var/log/syslog* /var/log/messages* kubeN# apt purge perl -y && apt autoremove -y kubeN# find /tmp -type f -size +10M | xargs rm
(venv1) server.corp13.un:~/kubespray# ansible all -m community.general.shutdown -i /root/kubespray/inventory/mycluster/hosts.yaml