Table of Contents

Система Kubernetes

Инструмент командной строки kubectl

Установка

Linux

# curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl

# chmod +x kubectl

# mv kubectl /usr/local/bin/

Windows

cmder$ curl -LO "https://dl.k8s.io/release/v1.29.0/bin/windows/amd64/kubectl.exe"

cmder$ mv kubectl.exe /usr/bin

Подключение к кластеру

mkdir ~/.kube/

scp root@192.168.X.2N1:.kube/config ~/.kube/

cat ~/.kube/config
...
    server: https://192.168.X.2N1:6443
...
kubectl get all -o wide --all-namespaces
kubectl get all -o wide -A

Настройка автодополнения

gitlab-runner@server:~$ source <(kubectl completion bash)

Подключение к другому кластеру

gitlab-runner@server:~$ scp root@kube1:.kube/config .kube/config_kube1

gitlab-runner@server:~$ cat .kube/config_kube1
...
    .kube/config_kube1
...
gitlab-runner@server:~$ export KUBECONFIG=~/.kube/config_kube1

gitlab-runner@server:~$ kubectl get nodes

Установка minikube

root@server:~# apt install -y curl wget apt-transport-https

root@server:~# wget https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64

root@server:~# mv minikube-linux-amd64 /usr/local/bin/minikube

root@server:~# chmod +x /usr/local/bin/minikube
gitlab-runner@server:~$ ### minikube delete
gitlab-runner@server:~$ ### rm -rv .minikube/

gitlab-runner@server:~$ time minikube start --driver=docker --insecure-registry "server.corpX.un:5000"
real    29m8.320s
...

gitlab-runner@server:~$ minikube status

gitlab-runner@server:~$ minikube ip

gitlab-runner@server:~$ minikube addons list

gitlab-runner@server:~$ minikube addons configure registry-creds   #Не нужно для registry попубличных проектов
...
Do you want to enable Docker Registry? [y/n]: y
-- Enter docker registry server url: http://server.corpX.un:5000
-- Enter docker registry username: student
-- Enter docker registry password:
...

gitlab-runner@server:~$ minikube addons enable registry-creds

gitlab-runner@server:~$ minikube kubectl -- get pods -A

gitlab-runner@server:~$ alias kubectl='minikube kubectl --'

gitlab-runner@server:~$ kubectl get pods -A

или

gitlab-runner@server:~$ ###minikube stop

gitlab-runner@server:~$ ###minikube start

Кластер Kubernetes

Развертывание через kubeadm

Подготовка узлов

node1# ssh-keygen

node1# ssh-copy-id node2
node1# ssh-copy-id node3

node1# bash -c '
swapoff -a
ssh node2 swapoff -a
ssh node3 swapoff -a
'

node1# bash -c '
sed -i"" -e "/swap/s/^/#/" /etc/fstab
ssh node2 sed -i"" -e "/swap/s/^/#/" /etc/fstab
ssh node3 sed -i"" -e "/swap/s/^/#/" /etc/fstab
'

Установка ПО

!!! Обратитесь к преподавателю !!!

Установка и настройка CRI
node1_2_3# apt-get install -y docker.io

Проверяем, если: 
containerd config dump | grep SystemdCgroup
            SystemdCgroup = false
то, выполняем следующие 4-ре команды:

bash -c 'mkdir -p /etc/containerd/
ssh node2 mkdir -p /etc/containerd/
ssh node3 mkdir -p /etc/containerd/
'
bash -c 'containerd config default > /etc/containerd/config.toml
ssh node2 "containerd config default > /etc/containerd/config.toml"
ssh node3 "containerd config default > /etc/containerd/config.toml"
'
bash -c 'sed -i "s/SystemdCgroup \= false/SystemdCgroup \= true/g" /etc/containerd/config.toml
ssh node2 sed -i \"s/SystemdCgroup \= false/SystemdCgroup \= true/g\" /etc/containerd/config.toml
ssh node3 sed -i \"s/SystemdCgroup \= false/SystemdCgroup \= true/g\" /etc/containerd/config.toml
'
bash -c 'service containerd restart
ssh node2 service containerd restart
ssh node3 service containerd restart
'
Подключаем репозиторий и устанавливаем ПО
bash -c 'mkdir -p /etc/apt/keyrings
ssh node2 mkdir -p /etc/apt/keyrings
ssh node3 mkdir -p /etc/apt/keyrings
'

bash -c 'curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
ssh node2 "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg"
ssh node3 "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg"
'

bash -c 'echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
ssh node2 echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" \| tee /etc/apt/sources.list.d/kubernetes.list
ssh node3 echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" \| tee /etc/apt/sources.list.d/kubernetes.list
'

bash -c 'apt-get update && apt-get install -y kubelet kubeadm kubectl
ssh node2 "apt-get update && apt-get install -y kubelet kubeadm kubectl"
ssh node3 "apt-get update && apt-get install -y kubelet kubeadm kubectl"
'

Инициализация master

root@node1:~# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.X.201

root@node1:~# mkdir -p $HOME/.kube

root@node1:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

root@node1:~# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

root@node1:~# kubectl get pod -o wide --all-namespaces

root@node1:~# kubectl get --raw='/readyz?verbose'
node1# bash -c '
rm /etc/containerd/config.toml
systemctl restart containerd
ssh node2 rm /etc/containerd/config.toml
ssh node2 systemctl restart containerd
ssh node3 rm /etc/containerd/config.toml
ssh node3 systemctl restart containerd
'

Подключение worker

root@node2_3:~# curl -k https://node1:6443/livez?verbose
root@node2_3:~# kubeadm join 192.168.X.201:6443 --token NNNNNNNNNNNNNNNNNNNN \
        --discovery-token-ca-cert-hash sha256:NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN

root@node2_3:~# curl -sSL http://127.0.0.1:10248/healthz

root@node1:~# kubeadm token list

root@node1:~# kubeadm token create --print-join-command

Проверка состояния

root@node1:~# kubectl cluster-info

root@node1:~# kubectl get nodes -o wide

root@node1:~# kubectl describe node node2

Удаление узла

$ kubectl cordon kube3

$ time kubectl drain kube3 #--ignore-daemonsets --delete-emptydir-data --force

$ kubectl delete node kube3

Удаление кластера

node1# bash -c '
kubeadm reset
ssh node2 kubeadm reset
ssh node3 kubeadm reset
'

Настройка доступа к Insecure Private Registry

!!! Обратитесь к преподавателю !!!

сontainerd
root@node1:~# mkdir -p /etc/containerd/

root@node1:~# cat /etc/containerd/config.toml
...
  [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
     [plugins."io.containerd.grpc.v1.cri".registry.mirrors."server.corpX.un:5000"]
      endpoint = ["http://server.corpX.un:5000"]
...
# no need
#  [plugins."io.containerd.grpc.v1.cri".registry.configs]
#    [plugins."io.containerd.grpc.v1.cri".registry.configs."server.corpX.un:5000".tls]
#      insecure_skip_verify = true

# don't work in cri-tools 1.25, need public project
#[plugins."io.containerd.grpc.v1.cri".registry.configs."server.corpX.un:5000".auth]
#      auth = "c3R1ZGVudDpwYXNzd29yZA=="
node1# bash -c '
ssh node2 mkdir -p /etc/containerd/
ssh node3 mkdir -p /etc/containerd/
scp /etc/containerd/config.toml node2:/etc/containerd/config.toml
scp /etc/containerd/config.toml node3:/etc/containerd/config.toml
systemctl restart containerd
ssh node2 systemctl restart containerd
ssh node3 systemctl restart containerd
'

root@nodeN:~# containerd config dump | less

Проверка

root@nodeN:~# crictl -r unix:///run/containerd/containerd.sock pull server.corpX.un:5000/student/gowebd

Развертывание через Kubespray

!!! Обратитесь к преподавателю !!!

kube1# ssh-keygen

kube1# ssh-copy-id kube1;ssh-copy-id kube2;ssh-copy-id kube3;ssh-copy-id kube4;

kube1# #apt update

kube1# #apt install python3-pip -y

kube1# git clone https://github.com/kubernetes-sigs/kubespray

kube1# cd kubespray/

~/kubespray# grep -r containerd_insecure_registries .
~/kubespray# git log

~/kubespray# git branch -r
~/kubespray# git checkout origin/release-2.22

~/kubespray# git tag -l
~/kubespray# ### git checkout tags/v2.22.1

~/kubespray# ### git checkout 4c37399c7582ea2bfb5202c3dde3223f9c43bf59

~/kubespray# ### git checkout master
~/kubespray# time pip3 install -r requirements.txt
real    1m48.202s

~/kubespray# cp -rvfpT inventory/sample inventory/mycluster

~/kubespray# declare -a IPS=(kube1,192.168.X.221 kube2,192.168.X.222 kube3,192.168.X.223)

~/kubespray# CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}

~/kubespray# less inventory/mycluster/hosts.yaml
~/kubespray# cat inventory/mycluster/group_vars/all/docker.yml
...
docker_registry_mirrors:
  - https://mirror.gcr.io
...
~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
...
containerd_registries_mirrors:
  - prefix: docker.io
    mirrors:
    - host: https://mirror.gcr.io
      capabilities: ["pull", "resolve"]
      skip_verify: false
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml
real    45m31.796s

kube1# less ~/.kube/config

~/kubespray# ###time ansible-playbook -i inventory/mycluster/hosts.yaml reset.yml
real    7m31.796s

Добавление узла через Kubespray

~/kubespray# cat inventory/mycluster/hosts.yaml
...
    node4:
      ansible_host: 192.168.X.204
      ip: 192.168.X.204
      access_ip: 192.168.X.204
...
    kube_node:
...
        node4:
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml --limit=kube4 scale.yml
real    17m37.459s

$ kubectl get nodes -o wide

Добавление insecure_registries через Kubespray

~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
...
containerd_insecure_registries:
  "server.corpX.un:5000": "http://server.corpX.un:5000"
containerd_registry_auth:
  - registry: server.corpX.un:5000
    username: student
    password: Pa$$w0rd
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml
user    46m37.151s

# less /etc/containerd/config.toml

Управление дополнениями через Kubespray

~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/addons.yml
...
helm_enabled: true
...
ingress_nginx_enabled: true
ingress_nginx_host_network: true
...

Базовые объекты k8s

Deployment, Replica Sets, Pods

$ kubectl api-resources

$ kubectl run my-debian --image=debian -- "sleep" "3600"

$ ###kubectl run -ti --rm my-debian --image=debian --overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "kube4"}}}'

$ kubectl get pods

kubeN# crictl ps | grep debi
kubeN# crictl images
nodeN# ctr ns ls
nodeN# ctr -n=k8s.io image ls | grep debi

$ kubectl delete pod my-debian
$ ###kubectl delete pod my-debian --grace-period=0 --force

$ kubectl create deployment my-debian --image=debian -- "sleep" "infinity"

$ kubectl get all
$ kubectl get deployments
$ kubectl get replicasets
$ kubectl attach my-debian-NNNNNNNNN-NNNNN

$ kubectl exec -ti my-debian-NNNNNNNNN-NNNNN -- bash
Ctrl-D

$ kubectl get deployment my-debian -o yaml
$ kubectl edit deployment my-debian

$ kubectl get pods -o wide

$ kubectl delete deployment my-debian
$ cat my-debian-deployment.yaml
apiVersion: apps/v1
kind: ReplicaSet
#kind: Deployment
metadata:
  name: my-debian
spec:
  selector:
    matchLabels:
      app: my-debian
  replicas: 2
  template:
    metadata:
      labels:
        app: my-debian
    spec:
      containers:
      - name: my-debian
        image: debian
        command: ["/bin/sh"]
        args: ["-c", "while true; do echo hello; sleep 3;done"]
      restartPolicy: Always
$ kubectl apply -f my-debian-deployment.yaml #--dry-run=client #-o yaml
...
$ kubectl delete -f my-debian-deployment.yaml

namespace для своего приложения

$ kubectl create namespace my-ns

$ kubectl get namespaces

$ ### kubectl create deployment my-webd --image=server.corpX.un:5000/student/webd:latest --replicas=2 -n my-ns

$ ### kubectl delete deployment my-webd -n my-ns

$ cd webd/

$ cat my-webd-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-webd
spec:
  selector:
    matchLabels:
      app: my-webd
  replicas: 2
  template:
    metadata:
      labels:
        app: my-webd
    spec:
      containers:
      - name: my-webd

#        image: server.corpX.un:5000/student/webd
#        image: server.corpX.un:5000/student/webd:ver1.N

#        imagePullPolicy: "Always"

#        image: httpd
#        lifecycle:
#          postStart:
#            exec:
#              command:
#              - /bin/sh
#              - -c
#              - |
#                cd /usr/local/apache2/htdocs/
#                echo "<h1>Hello from apache2 on $(hostname)</h1>" > index.html
#                echo "<img src=img/logo.gif>" >> index.html

#        env:
#        - name: APWEBD_HOSTNAME
#          value: "apwebd.corpX.un"
#        - name: KEYCLOAK_HOSTNAME
#          value: "keycloak.corpX.un"
#        - name: REALM_NAME
#          value: "corpX"

#        livenessProbe:
#          httpGet:
#            port: 80

#        volumeMounts:
#        - mountPath: /usr/local/apache2/htdocs
#          name: htdocs-volume

#        volumeMounts:
#        - name: nfs-volume
#          mountPath: /var/www

#      volumes:
#      - emptyDir: {}
#        name: htdocs-volume

#      volumes:
#      - name: nfs-volume
#        nfs:
#          server: server.corpX.un
#          path: /var/www

#      initContainers:
#      - name: load-htdocs-files
#        image: curlimages/curl
##        command: ['sh', '-c', 'mkdir /mnt/img; curl http://val.bmstu.ru/unix/Media/logo.gif > /mnt/img/logo.gif']
#        command: ["/bin/sh", "-c"]
#        args:
#        - |
#          mkdir /mnt/img; cd /mnt/img
#          curl http://val.bmstu.ru/unix/Media/logo.gif > logo.gif
#          ls -lR /mnt/
#        volumeMounts:
#        - mountPath: /mnt
#          name: htdocs-volume
$ kubectl apply -f my-webd-deployment.yaml -n my-ns #--dry-run=client #-o yaml

$ kubectl get all -n my-ns -o wide 

$ kubectl describe -n my-ns pod/my-webd-NNNNNNNNNN-NNNNN

$ kubectl -n my-ns logs pod/my-webd-NNNNNNNNNN-NNNNN #-c load-htdocs-files

$ kubectl scale deployment my-webd --replicas=3 -n my-ns

$ kubectl delete pod/my-webd-NNNNNNNNNN-NNNNN -n my-ns

Версии deployment

gitlab-runner@server:~$ kubectl -n my-ns rollout history deployment/my-webd
deployment.apps/my-webd
REVISION  CHANGE-CAUSE
1         <none>
...
N         <none>

gitlab-runner@server:~$ kubectl -n my-ns rollout history deployment/my-webd --revision=1
...
    Image:      server.corpX.un:5000/student/webd:ver1.1
...

kubectl -n my-ns rollout undo deployment/my-webd --to-revision=1

gitlab-runner@server:~$ kubectl -n my-ns rollout undo deployment/my-webd --to-revision=1

gitlab-runner@server:~$ kubectl -n my-ns rollout history deployment/my-webd
deployment.apps/my-webd
REVISION  CHANGE-CAUSE
2         <none>
...
N+1         <none>

Service

$ ### kubectl expose deployment my-webd --type=NodePort --port=80 -n my-ns

$ ### kubectl delete svc my-webd -n my-ns

$ cat my-webd-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-webd
spec:
#  type: NodePort
#  type: LoadBalancer
#  loadBalancerIP: 192.168.X.64
  selector:
    app: my-webd
  ports:
  - protocol: TCP
    port: 80
#    nodePort: 30111
$ kubectl apply -f my-webd-service.yaml -n my-ns

$ kubectl logs -l app=my-webd -n my-ns 
(доступны опции -f, --tail=2000, --previous)

NodePort

$ kubectl get svc my-webd -n my-ns
NAME              TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
my-webd-svc   NodePort   10.102.135.146   <none>        80:NNNNN/TCP   18h

$ kubectl describe svc my-webd -n my-ns

$ curl http://node1,2,3:NNNNN
на "самодельном kubeadm" кластере работает не стабильно
NodePort Minikube
$ minikube service list

$ minikube service my-webd --url -n my-ns
http://192.168.49.2:NNNNN

$ curl http://192.168.49.2:NNNNN

LoadBalancer

MetalLB
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml

$ kubectl -n metallb-system get all

$ cat first-pool.yaml
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: first-pool
  namespace: metallb-system
spec:
  addresses:
  - 192.168.X.64/28
  autoAssign: false
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: first-pool-advertisement
  namespace: metallb-system
spec:
  ipAddressPools:
  - first-pool
  interfaces:
  - eth0
$ kubectl apply -f first-pool.yaml

$ ### kubectl delete -f first-pool.yaml && rm first-pool.yaml

$ ### kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml

ClusterIP

kube1# host my-webd.my-ns.svc.cluster.local 169.254.25.10
...10.102.135.146...

server# ssh -p 32222 nodeN

my-openssh-server-NNNNNNNN-NNNNN:~# curl my-webd.my-ns.svc.cluster.local
  ИЛИ
my-openssh-server-NNNNNNNN-NNNNN:~# curl my-webd-webd-chart.my-ns.svc.cluster.local
port-forward
node1/kube1# kubectl port-forward -n my-ns --address 0.0.0.0 services/my-webd 1234:80

cmder> kubectl port-forward -n my-ns services/my-webd 1234:80
node1/kube1# kubectl -n my-ns delete pod/my-webd...
kubectl proxy
kube1:~# kubectl proxy --address='0.0.0.0' --accept-hosts='^*$'

cmder> kubectl proxy

Удаление объектов

$ kubectl get all -n my-ns

$ kubectl delete -n my-ns -f my-webd-deployment.yaml,my-webd-service.yaml

или

$ kubectl delete namespace my-ns

Ingress

Minikube ingress-nginx-controller

server# cat /etc/bind/corpX.un
...
webd A 192.168.49.2
gitlab-runner@server:~$ minikube addons enable ingress

Baremetal ingress-nginx-controller

server# cat /etc/bind/corpX.un
...
webd            A       192.168.X.202
                A       192.168.X.203
gowebd          CNAME   webd
node1# curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.1/deploy/static/provider/baremetal/deploy.yaml | tee ingress-nginx.controller-v1.3.1.baremetal.yaml

node1# cat ingress-nginx.controller-v1.3.1.baremetal.yaml
...
kind: Deployment
...
spec:
...
  replicas: 3    ### insert this (equial count of worker nodes)
  template:
...
      terminationGracePeriodSeconds: 300
      hostNetwork: true                    ###insert this
      volumes:
...
node1# kubectl apply -f ingress-nginx.controller-v1.3.1.baremetal.yaml

node1# kubectl get all -n ingress-nginx

node1# ###kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission

node1# ###kubectl delete -f ingress-nginx.controller-v1.3.1.baremetal.yaml

Управление конфигурацией ingress-nginx-controller

master-1:~$ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf

master-1:~$ kubectl edit -n ingress-nginx configmaps ingress-nginx-controller
...
data:
  use-forwarded-headers: "true"
...

Итоговый вариант с DaemonSet

node1# diff ingress-nginx.controller-v1.8.2.baremetal.yaml.orig ingress-nginx.controller-v1.8.2.baremetal.yaml
323a324
>   use-forwarded-headers: "true"
391c392,393
< kind: Deployment
---
> #kind: Deployment
> kind: DaemonSet
409,412c411,414
<   strategy:
<     rollingUpdate:
<       maxUnavailable: 1
<     type: RollingUpdate
---
> #  strategy:
> #    rollingUpdate:
> #      maxUnavailable: 1
> #    type: RollingUpdate
501a504
>       hostNetwork: true
node1# kubectl -n ingress-nginx describe service/ingress-nginx-controller
...
Endpoints:                192.168.X.221:80,192.168.X.222:80,192.168.X.223:80
...

ingress example

node1# ### kubectl create ingress my-ingress --class=nginx --rule="webd.corpX.un/*=my-webd:80" -n my-ns

node1# cat my-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: my-ingress
spec:
  ingressClassName: nginx
#  tls:
#  - hosts:
#    - gowebd.corpX.un
#    secretName: gowebd-tls
  rules:
  - host: webd.corpX.un
    http:
      paths:
      - backend:
          service:
            name: my-webd
            port:
              number: 80
        path: /
        pathType: Prefix
  - host: gowebd.corpX.un
    http:
      paths:
      - backend:
          service:
            name: my-gowebd
            port:
              number: 80
        path: /
        pathType: Prefix
node1# kubectl apply -f my-ingress.yaml -n my-ns


node1# kubectl get ingress -n my-ns
NAME      CLASS   HOSTS                             ADDRESS                         PORTS   AGE
my-webd   nginx   webd.corpX.un,gowebd.corpX.un   192.168.X.202,192.168.X.203   80      14m
$ curl webd.corpX.un
$ curl gowebd.corpX.un
$ curl https://gowebd.corpX.un #-kv

$ curl http://nodeN/ -H "Host: webd.corpX.un"
$ curl --connect-to "":"":kubeN:443 https://gowebd.corpX.un #-vk

$ kubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx -f

node1# ### kubectl delete ingress my-ingress -n my-ns

secrets tls

$ kubectl create secret tls gowebd-tls --key gowebd.key --cert gowebd.crt -n my-ns
    
$ kubectl get secrets -n my-ns

$ kubectl get secret/gowebd-tls -o yaml -n my-ns

$ ###kubectl delete secret/gowebd-tls -n my-ns

Volumes

PersistentVolume и PersistentVolumeVolumeClaim

root@node1:~# ssh node2 mkdir /disk2

root@node1:~# ssh node2 touch /disk2/disk2_node2

root@node1:~# kubectl label nodes node2 disk2=yes

root@node1:~# kubectl get nodes --show-labels

root@node1:~# ###kubectl label nodes node2 disk2-

root@node1:~# cat my-debian-deployment.yaml
...
        args: ["-c", "while true; do echo hello; sleep 3;done"]

        volumeMounts:
          - name: my-disk2-volume
            mountPath: /data

#        volumeMounts:
#          - name: data
#            mountPath: /data

      volumes:
        - name: my-disk2-volume
          hostPath:
            path: /disk2/
      nodeSelector:
        disk2: "yes"

#      volumes:
#      - name: data
#        persistentVolumeClaim:
#          claimName: my-ha-pvc-sz64m

      restartPolicy: Always
root@node1:~# kubectl apply -f my-debian-deployment.yaml

root@node1:~# kubectl get all -o wide
root@node1:~# cat my-ha-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: my-pv-node2-sz-128m-num-001
#  name: my-pv-kube3-keycloak
  labels:
    type: local
spec:
## comment storageClassName for keycloak
  storageClassName: my-ha-sc
  capacity:
    storage: 128Mi
#    storage: 8Gi
  accessModes:
    - ReadWriteMany
#    - ReadWriteOnce
  hostPath:
    path: /disk2
  persistentVolumeReclaimPolicy: Retain
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - node2
#          - kube3
root@node1:~# kubectl apply -f my-ha-pv.yaml

root@node1:~# kubectl get persistentvolume
  или
root@node1:~# kubectl get pv

root@kube1:~# ###ssh kube3 'mkdir /disk2/; chmod 777 /disk2/'
...
root@node1:~# ###kubectl delete pv my-pv-<TAB>

root@node1:~# cat my-ha-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: my-ha-pvc-sz64m
spec:
  storageClassName: my-ha-sc
#  storageClassName: local-path
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 64Mi
root@node1:~# kubectl apply -f my-ha-pvc.yaml

root@node1:~# kubectl get persistentvolumeclaims
  или
root@node1:~# kubectl get pvc
...

root@node1:~# ### kubectl delete pvc my-ha-pvc-sz64m

Dynamic Volume Provisioning

rancher local-path-provisioner

$ kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml

$ kubectl get sc

$ kubectl -n local-path-storage get all

$ curl https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml | less
/DEFAULT_PATH_FOR_NON_LISTED_NODES

ssh root@kube1 'mkdir /opt/local-path-provisioner'
ssh root@kube2 'mkdir /opt/local-path-provisioner'
ssh root@kube3 'mkdir /opt/local-path-provisioner'
ssh root@kube1 'chmod 777 /opt/local-path-provisioner'
ssh root@kube2 'chmod 777 /opt/local-path-provisioner'
ssh root@kube3 'chmod 777 /opt/local-path-provisioner'

$ ###kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
$ kubectl get pvc -n my-keycloak-ns

$ kubectl get pv

$ ###kubectl -n my-keycloak-ns delete pvc data-my-keycloak-postgresql-0

longhorn

kubeN:~# apt install open-iscsi
$ kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml

$ kubectl -n longhorn-system get pods -o wide --watch

Setting->General

Pod Deletion Policy When Node is Down: delete-statefuset-pod

Подключение через kubectl proxy

cmder> kubectl proxy

Подключение через ingress

!!! Добавить пример с аутентификацией !!!

student@server:~/longhorn$ cat ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: longhorn-ingress
  namespace: longhorn-system
spec:
  ingressClassName: nginx
  rules:
  - host: lh.corp13.un
    http:
      paths:
      - backend:
          service:
            name: longhorn-frontend
            port:
              number: 80
        path: /
        pathType: Prefix
Использование snapshot-ов
kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak

kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak-postgresql
kube1:~# kubectl -n my-keycloak-ns scale --replicas 1 statefulset my-keycloak-postgresql

kube1:~# kubectl -n my-keycloak-ns scale --replicas 2 statefulset my-keycloak
Использование backup-ов
Setting -> General -> Backup Target -> nfs://server.corp13.un:/var/www (nfs client linux не нужен)

ConfigMap

root@node1:~# cat sshd_config
PermitRootLogin yes
PasswordAuthentication no
ChallengeResponseAuthentication no
UsePAM no
root@node1:~# kubectl create configmap ssh-config --from-file=sshd_config --dry-run=client -o yaml
...

server:~# cat .ssh/id_rsa.pub
...

root@node1:~# cat my-openssh-server-deployment.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: ssh-config
data:
  sshd_config: |
    PermitRootLogin yes
    PasswordAuthentication no
    ChallengeResponseAuthentication no
    UsePAM no
  authorized_keys: |
    ssh-rsa AAAAB.....C0zOcZ68= root@server.corpX.un
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-openssh-server
spec:
  selector:
    matchLabels:
      app: my-openssh-server
  template:
    metadata:
      labels:
        app: my-openssh-server
    spec:
      containers:
      - name: my-openssh-server
        image: linuxserver/openssh-server
        command: ["/bin/sh"]
        args: ["-c", "/usr/bin/ssh-keygen -A; usermod -p '*' root; /usr/sbin/sshd.pam -D"]
        ports:
        - containerPort: 22
        volumeMounts:
        - name: ssh-volume
          subPath: sshd_config
          mountPath: /etc/ssh/sshd_config
        - name: ssh-volume
          subPath: authorized_keys
          mountPath: /root/.ssh/authorized_keys
      volumes:
      - name: ssh-volume
        configMap:
          name: ssh-config
---
apiVersion: v1
kind: Service
metadata:
  name: my-openssh-server
spec:
  type: NodePort
  ports:
  - port: 22
    nodePort: 32222
  selector:
    app: my-openssh-server
root@node1:~# kubectl apply -f my-openssh-server-deployment.yaml

root@node1:~# iptables-save | grep 32222

root@node1:~# ###kubectl exec -ti my-openssh-server-NNNNNNNN-NNNNN -- bash

server:~# ssh -p 32222 nodeN
Welcome to OpenSSH Server
my-openssh-server-NNNNNNNN-NNNNN:~# nslookup my-openssh-server.default.svc.cluster.local

Пример с multi container pod

gitlab-runner@gate:~/webd$ cat my-webd-ssh-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-webd-ssh
  namespace: my-ns
spec:
  selector:
    matchLabels:
      app: my-webd-ssh
  replicas: 1
  template:
    metadata:
      labels:
        app: my-webd-ssh
    spec:
      containers:
      - name: my-webd
        image: server.corpX.un:5000/student/webd:latest
        volumeMounts:
        - name: html
          mountPath: /var/www
      - name: my-ssh
        image: atmoz/sftp
        args: ["user3:password3:10003"]
        volumeMounts:
        - name: html
          mountPath: /home/user3/www
      volumes:
      - name: html
        emptyDir: {}
...
$ kubectl describe pod my-webd-NNNNNNNNNN-NNNNN -n my-ns

$ kubectl exec -ti -n my-ns my-webd-NNNNNNNNNN-NNNNN -c my-ssh -- bash

$ ### kubectl expose deployment my-webd-ssh --type=NodePort --port=80,22 -n my-ns

$ cat my-webd-ssh-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-webd-ssh
  namespace: my-ns
spec:
  type: NodePort
  selector:
    app: my-webd-ssh
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22

Helm

Установка Helm

# wget https://get.helm.sh/helm-v3.9.0-linux-amd64.tar.gz

# tar -zxvf helm-v3.9.0-linux-amd64.tar.gz

# mv linux-amd64/helm /usr/local/bin/helm

# source <(helm completion bash)

Работа с готовыми Charts

ingress-nginx

$ helm upgrade ingress-nginx --install ingress-nginx \
--set controller.hostNetwork=true,controller.publishService.enabled=false,controller.kind=DaemonSet,controller.config.use-forwarded-headers=true \
--repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace

$ helm list --namespace ingress-nginx
$ helm list -A

$ kubectl get all -n ingress-nginx -o wide

$ helm delete ingress-nginx --namespace ingress-nginx


$ mkdir ingress-nginx; cd ingress-nginx

$ helm template ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx | tee t1.yaml

$ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx | tee values.yaml.orig

$ cat values.yaml
controller:
  hostNetwork: true
  publishService:
    enabled: false
  kind: DaemonSet
#  config:
#    use-forwarded-headers: true
#    allow-snippet-annotations: true
$ helm template ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx | tee t2.yaml

$ helm upgrade ingress-nginx -i ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx --create-namespace

$ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf | grep use_forwarded_headers

$ kubectl -n ingress-nginx describe service/ingress-nginx-controller
...
Endpoints:                192.168.X.221:80,192.168.X.222:80,192.168.X.223:80
...

# kubectl get clusterrole -A | grep -i ingress
# kubectl get clusterrolebindings -A | grep -i ingress
# kubectl get validatingwebhookconfigurations -A | grep -i ingress

Развертывание своего приложения

gitlab-runner@server:~/gowebd-k8s$ helm create webd-chart

$ less webd-chart/templates/deployment.yaml

$ cat webd-chart/Chart.yaml
...
description: A Helm chart WebD for Kubernetes
...
version: 0.1.1
icon: https://val.bmstu.ru/unix/Media/logo.gif
...
appVersion: "latest"
#appVersion: ver1.7   #for vanilla argocd
$ cat webd-chart/values.yaml
...
replicaCount: 2

image:
  repository: server.corpX.un:5000/student/webd
  pullPolicy: Always
...
serviceAccount:
  create: false
...
service:
#  type: NodePort
...
ingress:
  enabled: true
  className: "nginx"
...
  hosts:
    - host: webd.corpX.un
...
#  tls: []
#  tls:
#    - secretName: gowebd-tls
#      hosts:
#        - gowebd.corpX.un
...
#APWEBD_HOSTNAME: "apwebd.corp13.un"
#KEYCLOAK_HOSTNAME: "keycloak.corp13.un"
#REALM_NAME: "corp13"
$ less webd-chart/templates/deployment.yaml
...
          imagePullPolicy: {{ .Values.image.pullPolicy }}
#          env:
#          - name: APWEBD_HOSTNAME
#            value: "{{ .Values.APWEBD_HOSTNAME }}"
#          - name: KEYCLOAK_HOSTNAME
#            value: "{{ .Values.KEYCLOAK_HOSTNAME }}"
#          - name: REALM_NAME
#            value: "{{ .Values.REALM_NAME }}"
...
$ helm lint webd-chart/

$ helm template my-webd webd-chart/ | less

$ helm install my-webd webd-chart/ -n my-ns --create-namespace --wait

$ kubectl describe events -n my-ns | less

$ export HELM_NAMESPACE=my-ns

$ helm list

$ ### helm upgrade my-webd webd-chart/ --set=image.tag=ver1.10

$ helm history my-webd

$ helm rollback my-webd 1

$ helm uninstall my-webd

Работа со своим репозиторием

gitlab-runner@server:~/gowebd-k8s$ helm repo add --username student --password NNNNN-NNNNNNNNNNNNNNNNNNN webd http://server.corpX.un/api/v4/projects/N/packages/helm/stable
"webd" has been added to your repositories

gitlab-runner@server:~/gowebd-k8s$ ### helm repo remove webd

gitlab-runner@server:~/gowebd-k8s$ helm repo list

gitlab-runner@server:~/gowebd-k8s$ helm package webd-chart

gitlab-runner@server:~/gowebd-k8s$ tar -tf webd-chart-0.1.1.tgz

gitlab-runner@server:~/gowebd-k8s$ helm plugin install https://github.com/chartmuseum/helm-push

gitlab-runner@server:~/gowebd-k8s$ helm cm-push webd-chart-0.1.1.tgz webd

gitlab-runner@server:~/gowebd-k8s$ rm webd-chart-0.1.1.tgz
kube1:~# helm repo add webd http://server.corpX.un/api/v4/projects/N/packages/helm/stable

kube1:~# helm repo update

kube1:~# helm search repo webd

kube1:~# helm repo update webd

kube1:~# helm install my-webd webd/webd-chart

kube1:~# ###helm uninstall my-webd
kube1:~# mkdir gowebd; cd gowebd

kube1:~/gowebd# ###helm pull webd/webd-chart
kube1:~/gowebd# ###helm pull webd-chart --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable

kube1:~/gowebd# helm show values webd-chart --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable | tee values.yaml.orig

kube1:~/gowebd# cat values.yaml
replicaCount: 3
image:
  tag: "ver1.1"
#REALM_NAME: "corp"
kube1:~/gowebd# helm upgrade my-webd -i webd-chart -f values.yaml -n my-ns --create-namespace --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable

$ curl http://kubeN -H "Host: gowebd.corpX.un"

kube1:~/gowebd# ###helm uninstall my-webd -n my-ns

Работа с публичными репозиториями

gitlab-runner kubernetes

gitlab-runner@server:~$ helm repo add gitlab https://charts.gitlab.io

gitlab-runner@server:~$ helm repo list

gitlab-runner@server:~$ helm search repo -l gitlab/gitlab-runner

gitlab-runner@server:~$ helm show values gitlab/gitlab-runner --version 0.56.0 | tee values.yaml

gitlab-runner@server:~$ diff values.yaml values.yaml.orig
...
gitlabUrl: http://server.corpX.un/
...
runnerRegistrationToken: "NNNNNNNNNNNNNNNNNNNNNNNN"
...
148,149c142
<   create: true
---
>   create: false
325d317
<         privileged = true
432c424
<   allowPrivilegeEscalation: true
---
>   allowPrivilegeEscalation: false
435c427
<   privileged: true
---
>   privileged: false
gitlab-runner@server:~$ helm upgrade -i gitlab-runner gitlab/gitlab-runner -f values.yaml -n gitlab-runner --create-namespace --version 0.56.0

gitlab-runner@server:~$ kubectl get all -n gitlab-runner
SSL/TLS
# kubectl -n gitlab-runner create configmap wild-crt --from-file=wild.crt

# cat values.yaml
...
gitlabUrl: https://server.corpX.un/
...
  config: |
    [[runners]]
      tls-ca-file = "/mnt/wild.crt"
      [runners.kubernetes]      
...
#volumeMounts: []
volumeMounts:
  - name: wild-crt
    subPath: wild.crt
    mountPath: /mnt/wild.crt
    
#volumes: []
volumes:
  - name: wild-crt
    configMap:
      name: wild-crt

Kubernetes Dashboard

$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

$ cat dashboard-user-role.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
$ kubectl apply -f dashboard-user-role.yaml

$ kubectl -n kubernetes-dashboard create token admin-user

$ kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d ; echo

cmder$ kubectl proxy

Отладка, troubleshooting

Отладка etcd

kubeN:~# more /etc/kubernetes/manifests/kube-apiserver.yaml

kubeN:~# etcdctl member list -w table \
  --endpoints=https://kube1:2379 \
  --cacert=/etc/ssl/etcd/ssl/ca.pem \
  --cert=/etc/ssl/etcd/ssl/node-kube1.pem \
  --key=/etc/ssl/etcd/ssl/node-kube1-key.pem

kubeN:~# etcdctl endpoint status -w table \
  --endpoints=https://kube1:2379,https://kube2:2379,https://kube3:2379 \
  --cacert=/etc/ssl/etcd/ssl/ca.pem \
  --cert=/etc/ssl/etcd/ssl/node-kube1.pem \
  --key=/etc/ssl/etcd/ssl/node-kube1-key.pem

Дополнительные материалы

Установка kubelet kubeadm kubectl в ubuntu20

mkdir /etc/apt/keyrings

curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

apt update && apt install -y kubeadm=1.28.1-1.1 kubelet=1.28.1-1.1 kubectl=1.28.1-1.1

Use .kube/config Client certs in curl

cat ~/.kube/config | yq -r '.clusters[0].cluster."certificate-authority-data"' | base64 -d - > ~/.kube/ca.pem 
cat ~/.kube/config | yq -r '.users[0].user."client-certificate-data"' | base64 -d - > ~/.kube/user.pem
cat ~/.kube/config | yq -r '.users[0].user."client-key-data"' | base64 -d - > ~/.kube/user-key.pem

SERVER_URL=$(cat ~/.kube/config | yq -r .clusters[0].cluster.server)

curl --cacert ~/.kube/ca.pem --cert ~/.kube/user.pem --key ~/.kube/user-key.pem -X GET  ${SERVER_URL}/api/v1/namespaces/default/pods/

bare-metal minikube

student@node2:~$ sudo apt install conntrack

https://computingforgeeks.com/install-mirantis-cri-dockerd-as-docker-engine-shim-for-kubernetes/
...

wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz
...

student@node2:~$ minikube start --driver=none --insecure-registry "server.corpX.un:5000"

minikube dashboard

student@node1:~$ minikube dashboard &
...
Opening http://127.0.0.1:NNNNN/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser
...
/home/mobaxterm> ssh -L NNNNN:localhost:NNNNN student@192.168.X.10
Теперь, та же ссылка работает на win host системе

Подключение к minikube с другой системы

student@node1:~$ tar -cvzf kube-config.tar.gz .kube/config .minikube/ca.crt .minikube/profiles/minikube

gitlab-runner@server:~$ scp student@node1:kube-config.tar.gz .

gitlab-runner@server:~$ tar -xvf kube-config.tar.gz

gitlab-runner@server:~$ cat .kube/config
...
    certificate-authority: /home/gitlab-runner/.minikube/ca.crt
...
    client-certificate: /home/gitlab-runner/.minikube/profiles/minikube/client.crt
    client-key: /home/gitlab-runner/.minikube/profiles/minikube/client.key
...

kompose

root@gate:~# curl -L https://github.com/kubernetes/kompose/releases/download/v1.26.0/kompose-linux-amd64 -o kompose
root@gate:~# chmod +x kompose
root@gate:~# sudo mv ./kompose /usr/local/bin/kompose
gitlab-runner@gate:~/webd$ kompose convert
gitlab-runner@gate:~/webd$ ls *yaml
gitlab-runner@gate:~/webd$ kubectl apply -f sftp-deployment.yaml,vol1-persistentvolumeclaim.yaml,webd-service.yaml,sftp-service.yaml,webd-deployment.yaml
gitlab-runner@gate:~/webd$ kubectl get all