# curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl # chmod +x kubectl # mv kubectl /usr/local/bin/
cmder$ curl -LO "https://dl.k8s.io/release/v1.29.0/bin/windows/amd64/kubectl.exe" cmder$ mv kubectl.exe /usr/bin
mkdir ~/.kube/ scp root@192.168.X.2N1:.kube/config ~/.kube/ cat ~/.kube/config
...
    server: https://192.168.X.2N1:6443
...
kubectl version kubectl get all -o wide --all-namespaces kubectl get all -o wide -A
kube1:~# less /etc/bash_completion.d/kubectl.sh или $ cat ~/.profile
#... source <(kubectl completion bash) alias k=kubectl complete -F __start_kubectl k #...
gitlab-runner@server:~$ scp root@kube1:.kube/config .kube/config_kube1 gitlab-runner@server:~$ cat .kube/config_kube1
...
    .kube/config_kube1
...
gitlab-runner@server:~$ export KUBECONFIG=~/.kube/config_kube1 gitlab-runner@server:~$ kubectl get nodes
root@server:~# apt install -y curl wget apt-transport-https root@server:~# wget https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 root@server:~# mv minikube-linux-amd64 /usr/local/bin/minikube root@server:~# chmod +x /usr/local/bin/minikube
gitlab-runner@server:~$ time minikube start --driver=docker --insecure-registry "server.corpX.un:5000" real 41m8.320s ... gitlab-runner@server:~$ minikube status gitlab-runner@server:~$ minikube ip
gitlab-runner@server:~$ minikube kubectl -- get pods -A gitlab-runner@server:~$ cat ~/.profile
#... # not work in gitlab-ci alias kubectl='minikube kubectl --' #...
gitlab-runner@server:~$ kubectl get pods -A
или
# cp -v /home/gitlab-runner/.minikube/cache/linux/amd64/v*/kubectl /usr/local/bin/
или
gitlab-runner@server:~$ minikube addons list gitlab-runner@server:~$ minikube addons configure registry-creds ... Do you want to enable Docker Registry? [y/n]: y -- Enter docker registry server url: http://server.corpX.un:5000 -- Enter docker registry username: student -- Enter docker registry password: ... gitlab-runner@server:~$ minikube addons enable registry-creds
gitlab-runner@server:~$ ###minikube stop gitlab-runner@server:~$ ### minikube delete gitlab-runner@server:~$ ### rm -rv .minikube/ gitlab-runner@server:~$ ###minikube start
node1# ssh-keygen node1# ssh-copy-id node2 node1# ssh-copy-id node3 node1# bash -c ' swapoff -a ssh node2 swapoff -a ssh node3 swapoff -a ' node1# bash -c ' sed -i"" -e "/swap/s/^/#/" /etc/fstab ssh node2 sed -i"" -e "/swap/s/^/#/" /etc/fstab ssh node3 sed -i"" -e "/swap/s/^/#/" /etc/fstab '
node1_2_3# apt-get install -y docker.io
Проверяем, если: 
node1# containerd config dump | grep SystemdCgroup
не равно:
           SystemdCgroup = true
то, выполняем следующие 4-ре команды:
bash -c 'mkdir -p /etc/containerd/
ssh node2 mkdir -p /etc/containerd/
ssh node3 mkdir -p /etc/containerd/
'
bash -c 'containerd config default > /etc/containerd/config.toml
ssh node2 "containerd config default > /etc/containerd/config.toml"
ssh node3 "containerd config default > /etc/containerd/config.toml"
'
bash -c 'sed -i "s/SystemdCgroup \= false/SystemdCgroup \= true/g" /etc/containerd/config.toml
ssh node2 sed -i \"s/SystemdCgroup \= false/SystemdCgroup \= true/g\" /etc/containerd/config.toml
ssh node3 sed -i \"s/SystemdCgroup \= false/SystemdCgroup \= true/g\" /etc/containerd/config.toml
'
bash -c 'service containerd restart
ssh node2 service containerd restart
ssh node3 service containerd restart
'
bash -c 'mkdir -p /etc/apt/keyrings ssh node2 mkdir -p /etc/apt/keyrings ssh node3 mkdir -p /etc/apt/keyrings ' bash -c 'curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg ssh node2 "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" ssh node3 "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" ' bash -c 'echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list ssh node2 echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" \| tee /etc/apt/sources.list.d/kubernetes.list ssh node3 echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /" \| tee /etc/apt/sources.list.d/kubernetes.list ' bash -c 'apt-get update && apt-get install -y kubelet kubeadm kubectl ssh node2 "apt-get update && apt-get install -y kubelet kubeadm kubectl" ssh node3 "apt-get update && apt-get install -y kubelet kubeadm kubectl" ' Время выполнения: 2 минуты
root@node1:~# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.X.201 Время выполнения: 3 минуты root@node1:~# mkdir -p $HOME/.kube root@node1:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@nodeN:~# lsmod | grep br_netfilter
root@node1:~# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
root@node1:~# kubectl get pod --all-namespaces -o wide root@node1:~# kubectl get --raw='/readyz?verbose'
root@node2_3:~# curl -k https://node1:6443/livez?verbose
root@node2_3:~# kubeadm join 192.168.X.201:6443 --token NNNNNNNNNNNNNNNNNNNN \
        --discovery-token-ca-cert-hash sha256:NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
root@node2_3:~# curl -sSL http://127.0.0.1:10248/healthz
root@node1:~# kubeadm token list
root@node1:~# kubeadm token create --print-join-command
root@node1:~# kubectl cluster-info root@node1:~# kubectl get nodes -o wide root@node1:~# kubectl describe node node2
$ kubectl cordon kube3 $ time kubectl drain kube3 #--ignore-daemonsets --delete-emptydir-data --force $ kubectl delete node kube3
node1# bash -c ' kubeadm reset ssh node2 kubeadm reset ssh node3 kubeadm reset '
root@node1:~# mkdir -p /etc/containerd/ root@node1:~# ###containerd config default > /etc/containerd/config.toml root@node1:~# cat /etc/containerd/config.toml
...
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."server.corpX.un:5000"]
          endpoint = ["http://server.corpX.un:5000"]
...
node1# bash -c ' ssh node2 mkdir -p /etc/containerd/ ssh node3 mkdir -p /etc/containerd/ scp /etc/containerd/config.toml node2:/etc/containerd/config.toml scp /etc/containerd/config.toml node3:/etc/containerd/config.toml systemctl restart containerd ssh node2 systemctl restart containerd ssh node3 systemctl restart containerd ' root@nodeN:~# containerd config dump | less
# mkdir -p /etc/containerd/certs.d/server.corpX.un:5000/ # cat /etc/containerd/certs.d/server.corpX.un:5000/hosts.toml
[host."http://server.corpX.un:5000"] capabilities = ["pull", "resolve", "push"] skip_verify = true
# systemctl restart containerd.service
Проверка
root@nodeN:~# crictl -r unix:///run/containerd/containerd.sock pull server.corpX.un:5000/student/gowebd root@kubeN:~# crictl pull server.corpX.un:5000/student/pywebd2
(venv1) server# ssh-keygen (venv1) server# ssh-copy-id kube1;ssh-copy-id kube2;ssh-copy-id kube3;ssh-copy-id kube4; (venv1) server# git clone https://github.com/kubernetes-sigs/kubespray (venv1) server# cd kubespray/ (venv1) server:~/kubespray# git tag -l (venv1) server:~/kubespray# git checkout tags/v2.26.0 или (venv1) server:~/kubespray# git checkout tags/v2.27.0 (venv1) server:~/kubespray# time pip3 install -r requirements.txt (venv1) server:~/kubespray# cp -rvfpT inventory/sample inventory/mycluster (venv1) server:~/kubespray# cat inventory/mycluster/hosts.yaml
all:
  hosts:
    kube1:
    kube2:
    kube3:
    kube4:
  children:
    kube_control_plane:
      hosts:
        kube1:
        kube2:
    kube_node:
      hosts:
        kube1:
        kube2:
        kube3:
    etcd:
      hosts:
        kube1:
        kube2:
        kube3:
    k8s_cluster:
      children:
        kube_control_plane:
        kube_node:
    calico_rr:
      hosts: {}
(venv1) server:~/kubespray# ansible all -m ping -i inventory/mycluster/hosts.yaml
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml real 45m31.796s kube1# less ~/.kube/config ~/kubespray# ###time ansible-playbook -i inventory/mycluster/hosts.yaml reset.yml real 7m31.796s
~/kubespray# cat inventory/mycluster/hosts.yaml
all:
  hosts:
...
    kube4:
...
    kube_node:
      hosts:
...
        kube4:
...
(venv1) server:~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml real 6m31.562s ~/kubespray# ###time ansible-playbook -i inventory/mycluster/hosts.yaml --limit=kube4 scale.yml real 17m37.459s $ kubectl get nodes -o wide
~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
...
containerd_insecure_registries:
  "server.corpX.un:5000": "http://server.corpX.un:5000"
containerd_registry_auth:
  - registry: server.corpX.un:5000
    username: student
    password: Pa$$w0rd
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml user 46m37.151s # less /etc/containerd/config.toml
~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/addons.yml
... helm_enabled: true ... ingress_nginx_enabled: true ingress_nginx_host_network: true ...
$ kubectl api-resources
$ ###kubectl run -ti --rm my-debian --image=debian --overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "kube4"}}}'
$ kubectl run my-debian --image=debian -- "sleep" "60"
$ kubectl get pods
kubeN# crictl ps | grep debi
kubeN# crictl images
nodeN# ctr ns ls
nodeN# ctr -n=k8s.io image ls | grep debi
$ kubectl delete pod my-debian
$ ###kubectl delete pod my-debian --grace-period=0 --force
$ kubectl create deployment my-debian --image=debian -- "sleep" "infinity"
$ kubectl get all
$ kubectl get deployments
$ kubectl get replicasets
$ kubectl attach my-debian-NNNNNNNNN-NNNNN $ kubectl exec -ti my-debian-NNNNNNNNN-NNNNN -- bash Ctrl-D
$ kubectl get deployment my-debian -o yaml
$ kubectl edit deployment my-debian $ kubectl get pods -o wide $ kubectl delete deployment my-debian
$ cat my-debian-deployment.yaml
apiVersion: apps/v1
kind: ReplicaSet
#kind: Deployment
metadata:
  name: my-debian
spec:
  selector:
    matchLabels:
      app: my-debian
  replicas: 2
  template:
    metadata:
      labels:
        app: my-debian
    spec:
      containers:
      - name: my-debian
        image: debian
        command: ["/bin/sh"]
        args: ["-c", "while :;do echo -n random-value:;od -A n -t d -N 1 /dev/urandom;sleep 5; done"]
        resources:
          requests:
            memory: "64Mi"
            cpu: "250m"
          limits:
            memory: "128Mi"
            cpu: "500m"
      restartPolicy: Always
$ kubectl apply -f my-debian-deployment.yaml #--dry-run=client #-o yaml $ kubectl logs -l app=my-debian -f ... $ kubectl delete -f my-debian-deployment.yaml
$ kubectl create namespace my-ns $ kubectl get namespaces $ ### kubectl create deployment my-webd --image=server.corpX.un:5000/student/webd:latest --replicas=2 -n my-ns $ ### kubectl delete deployment my-webd -n my-ns $ mkdir ??webd-k8s/; cd $_ $ cat my-webd-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-webd
#  annotations:
#    kubernetes.io/change-cause: "update to ver1.2"
spec:
  selector:
    matchLabels:
      app: my-webd
  replicas: 2
  template:
    metadata:
      labels:
        app: my-webd
    spec:
      containers:
      - name: my-webd
#        image: server.corpX.un:5000/student/webd
#        image: server.corpX.un:5000/student/webd:ver1.N
#        image: httpd
#        args: ["gunicorn", "app:app", "--bind", "0.0.0.0:8000", "-k", "uvicorn.workers.UvicornWorker"]
#        imagePullPolicy: "Always"
#        lifecycle:
#          postStart:
#            exec:
#              command:
#              - /bin/sh
#              - -c
#              - |
#                #test -f /usr/local/apache2/htdocs/index.html && exit 0
#                mkdir -p /usr/local/apache2/htdocs/
#                cd /usr/local/apache2/htdocs/
#                echo "<h1>Hello from apache2 on $(hostname) at $(date)</h1>" > index.html
#                echo "<img src=img/logo.gif>" >> index.html
#        env:
#        - name: PYWEBD_DOC_ROOT
#          value: "/usr/local/apache2/htdocs/"
#        - name: PYWEBD_PORT
#          value: "4080"
#        - name: APWEBD_HOSTNAME
#          value: "apwebd.corpX.un"
#        - name: KEYCLOAK_HOSTNAME
#          value: "keycloak.corpX.un"
#        - name: REALM_NAME
#          value: "corpX"
#        livenessProbe:
#          httpGet:
#            port: 80
#            #scheme: HTTPS
#        volumeMounts:
#        - name: htdocs-volume
#          mountPath: /usr/local/apache2/htdocs
#        volumeMounts:
#        - name: nfs-volume
#          mountPath: /var/www
#      volumes:
#      - name: htdocs-volume
#        emptyDir: {}
#      volumes:
#      - name: nfs-volume
#        nfs:
#          server: server.corpX.un
#          path: /var/www
#      initContainers:
#      - name: load-htdocs-files
#        image: curlimages/curl
##        command: ['sh', '-c', 'mkdir /mnt/img; curl http://val.bmstu.ru/unix/Media/logo.gif > /mnt/img/logo.gif']
#        command: ["/bin/sh", "-c"]
#        args:
#        - |
#          test -d /mnt/img/ && exit 0
#          mkdir /mnt/img; cd /mnt/img
#          curl http://val.bmstu.ru/unix/Media/logo.gif > logo.gif
#          ls -lR /mnt/
#        volumeMounts:
#        - mountPath: /mnt
#          name: htdocs-volume
$ kubectl apply -f my-webd-deployment.yaml -n my-ns #--dry-run=client #-o yaml $ kubectl get all -n my-ns -o wide $ kubectl describe -n my-ns pod/my-webd-NNNNNNNNNN-NNNNN $ kubectl -n my-ns logs pod/my-webd-NNNNNNNNNN-NNNNN #-c load-htdocs-files $ kubectl logs -l app=my-webd -n my-ns (доступны опции -f, --tail=2000, --previous) $ kubectl scale deployment my-webd --replicas=3 -n my-ns $ kubectl delete pod/my-webd-NNNNNNNNNN-NNNNN -n my-ns
$ ###kubectl rollout pause deployment my-webd-dep -n my-ns $ ###kubectl set image deployment/my-webd-dep my-webd-con=server.corpX.un:5000/student/gowebd:ver1.2 -n my-ns $ ###kubectl rollout resume deployment my-webd-dep -n my-ns $ ###kubectl rollout status deployment/my-webd-dep -n my-ns $ kubectl rollout history deployment/my-webd -n my-ns
REVISION CHANGE-CAUSE 1 <none> ... N update to ver1.2
$ kubectl rollout history deployment/my-webd --revision=1 -n my-ns
...
    Image:      server.corpX.un:5000/student/webd:ver1.1
...
$ kubectl rollout undo deployment/my-webd --to-revision=1 -n my-ns $ kubectl annotate deployment/my-webd kubernetes.io/change-cause="revert to ver1.1" -n my-ns $ kubectl rollout history deployment/my-webd -n my-ns
REVISION CHANGE-CAUSE 2 update to ver1.2 ... N+1 revert to ver1.1
$ ### kubectl expose deployment my-webd --type=NodePort --port=80 -n my-ns $ ### kubectl delete svc my-webd -n my-ns $ cat my-webd-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-webd
spec:
#  type: NodePort
#  type: LoadBalancer
#  loadBalancerIP: 192.168.X.64
  selector:
    app: my-webd
  ports:
  - protocol: TCP
    port: 80
#    targetPort: 4080
#    nodePort: 30111
$ kubectl apply -f my-webd-service.yaml -n my-ns $ kubectl describe svc my-webd -n my-ns $ kubectl get endpoints -n my-ns или $ kubectl get endpointslice -n my-ns
$ kubectl get svc my-webd -n my-ns NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-webd-svc NodePort 10.102.135.146 <none> 80:NNNNN/TCP 18h $ curl http://kube1,2,3:NNNNN
$ minikube service list $ minikube service my-webd --url -n my-ns http://192.168.49.2:NNNNN $ curl http://192.168.49.2:NNNNN
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml $ kubectl -n metallb-system get all $ cat first-pool.yaml
--- apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: first-pool namespace: metallb-system spec: addresses: - 192.168.X.64/28 autoAssign: false # autoAssign: true --- apiVersion: metallb.io/v1beta1 kind: L2Advertisement metadata: name: first-pool-advertisement namespace: metallb-system spec: ipAddressPools: - first-pool interfaces: - eth0
$ kubectl apply -f first-pool.yaml ... $ kubectl get svc my-webd -n my-ns NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-webd LoadBalancer 10.233.23.29 192.168.X.64 80:NNNNN/TCP 50s $ #kubectl delete -f first-pool.yaml && rm first-pool.yaml $ #kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kube1# host my-webd.my-ns.svc.cluster.local 169.254.25.10 kube1# curl my-webd.my-ns.svc.cluster.local
node1/kube1# kubectl port-forward -n my-ns --address 0.0.0.0 services/my-webd 1234:80 cmder> kubectl port-forward -n my-ns services/my-webd 1234:80
node1/kube1# kubectl -n my-ns delete pod/my-webd...
kube1:~# kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' cmder> kubectl proxy
$ kubectl get all -n my-ns $ kubectl delete -n my-ns -f my-webd-deployment.yaml,my-webd-service.yaml или $ kubectl delete namespace my-ns
server# cat /etc/bind/corpX.un
... webd A 192.168.49.2
gitlab-runner@server:~$ minikube addons enable ingress
server# cat /etc/bind/corpX.un
...
webd            A       192.168.X.202
                A       192.168.X.203
gowebd          CNAME   webd
node1# curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.1/deploy/static/provider/baremetal/deploy.yaml | tee ingress-nginx.controller-v1.3.1.baremetal.yaml node1# cat ingress-nginx.controller-v1.3.1.baremetal.yaml
...
kind: Deployment
...
spec:
...
  replicas: 3    ### insert this (equial count of worker nodes)
  template:
...
      terminationGracePeriodSeconds: 300
      hostNetwork: true                    ###insert this
      volumes:
...
node1# kubectl apply -f ingress-nginx.controller-v1.3.1.baremetal.yaml node1# kubectl get all -n ingress-nginx node1# ###kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission node1# ###kubectl delete -f ingress-nginx.controller-v1.3.1.baremetal.yaml
kube1:~# mkdir -p ingress-nginx; cd $_ kube1:~/ingress-nginx# curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.0/deploy/static/provider/baremetal/deploy.yaml | tee ingress-nginx.controller-v1.12.0.baremetal.yaml kube1:~/ingress-nginx# cat ingress-nginx.controller-v1.12.0.baremetal.yaml
...
apiVersion: v1
#data: null
data:
  allow-snippet-annotations: "true"
  use-forwarded-headers: "true"
kind: ConfigMap
...
#kind: Deployment
kind: DaemonSet
...
#  strategy:
#    rollingUpdate:
#      maxUnavailable: 1
#    type: RollingUpdate
...
      hostNetwork: true                    ### insert this
      terminationGracePeriodSeconds: 300
      volumes:
...
kube1:~/ingress-nginx# kubectl apply -f ingress-nginx.controller-v1.12.0.baremetal.yaml kube1:~/ingress-nginx# kubectl -n ingress-nginx get pods -o wide kube1:~/ingress-nginx# kubectl -n ingress-nginx describe service/ingress-nginx-controller
... Endpoints: 192.168.X.221:80,192.168.X.222:80,192.168.X.223:80 ...
kube1:~/ingress-nginx# ###kubectl delete -f ingress-nginx.controller-v1.12.0.baremetal.yaml
master-1:~$ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf master-1:~$ kubectl edit -n ingress-nginx configmaps ingress-nginx-controller
... data: use-forwarded-headers: "true" ...
kube1# ### kubectl create ingress my-ingress --class=nginx --rule="webd.corpX.un/*=my-webd:80" -n my-ns kube1# cat my-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: my-ingress
#  annotations:
#    nginx.ingress.kubernetes.io/canary: "true"
#    nginx.ingress.kubernetes.io/canary-weight: "30"
spec:
  ingressClassName: nginx
#  tls:
#  - hosts:
#    - gowebd.corpX.un
#    secretName: gowebd-tls
  rules:
  - host: webd.corpX.un
    http:
      paths:
      - backend:
          service:
            name: my-webd
            port:
              number: 4080
        path: /
        pathType: Prefix
  - host: gowebd.corpX.un
    http:
      paths:
      - backend:
          service:
            name: my-gowebd
            port:
              number: 80
        path: /
        pathType: Prefix
kube1# kubectl apply -f my-ingress.yaml -n my-ns kube1# kubectl get ingress -n my-ns NAME CLASS HOSTS ADDRESS PORTS AGE my-webd nginx webd.corpX.un,gowebd.corpX.un 192.168.X.202,192.168.X.203 80 14m
$ curl webd.corpX.un $ curl gowebd.corpX.un $ curl https://gowebd.corpX.un #-kv $ curl http://nodeN/ -H "Host: webd.corpX.un" $ curl --connect-to "":"":kubeN:443 https://gowebd.corpX.un #-vk $ kubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx -f kube1# ### kubectl delete ingress my-ingress -n my-ns
$ kubectl create secret tls gowebd-tls --key gowebd.key --cert gowebd.crt -n my-ns
    
$ kubectl get secrets -n my-ns
$ kubectl get secret/gowebd-tls -o yaml -n my-ns
$ ###kubectl delete secret/gowebd-tls -n my-ns
увидеть ссылку student@debian:~/gowebd-k8s$ kubectl -n my-ns get ingress -o yaml | less увидеть обработчик student@debian:~/gowebd-k8s$ kubectl -n my-ns get pods NAME READY STATUS RESTARTS AGE cm-acme-http-solver-5j2pr 1/1 Running 0 28s my-webd-78ffd6cc5f-4qplt 1/1 Running 0 4d14h my-webd-78ffd6cc5f-zpcsh 1/1 Running 0 4d14h
kube1# kubectl label nodes kube3 htdocs-node=yes kube1# kubectl get nodes --show-labels kube1:~/pywebd-k8s# cat my-webd-deployment.yaml
...
        volumeMounts:
        - name: htdocs-volume
          mountPath: /usr/local/apache2/htdocs
#        lifecycle:
#        ...
      volumes:
      - name: htdocs-volume
        hostPath:
          path: /var/www/
      nodeSelector:
        htdocs-node: "yes"
#      initContainers:
#      ...
kube1:~/pv# cat my-ha-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: my-pv-kube3-sz-128m-num-001
#  name: my-pv-kube3-keycloak
#  labels:
#    type: local
spec:
## comment storageClassName for keycloak
  storageClassName: my-ha-sc
  capacity:
    storage: 128Mi
#    storage: 8Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
#    path: /disk2
    path: /disk2/dir1
  persistentVolumeReclaimPolicy: Retain
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - kube3
---
#...
kube1:~/pv# kubectl apply -f my-ha-pv.yaml
kube1# kubectl get pv
kube1# kubectl delete pv my-pv-kube3-sz-128m-num-001
kube3# mkdir -p /disk2/dir{0..3}
kube3# chmod 777 -R /disk2/
kube3# find /disk2/
kube3# ###rm -rf /disk2/
kube1:~/pv# cat my-ha-pv-chart/Chart.yaml
apiVersion: v2 name: my-ha-pv-chart version: 0.1.0
kube1:~/pv# cat my-ha-pv-chart/values.yaml
volume_names: - "dir1" - "dir2" - "dir3" numVolumes: "3"
kube1:~/pv# cat my-ha-pv-chart/templates/my-ha-pv.yaml
{{ range .Values.volume_names }}
{{/* range $k, $v := until (atoi .Values.numVolumes) */}}
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: my-pv-sz-128m-num-{{ . }}
spec:
  storageClassName: my-ha-sc
  capacity:
    storage: 128Mi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: /disk2/{{ . }}/
{{/*    path: /disk2/dir{{ $v }}/ */}}
  persistentVolumeReclaimPolicy: Retain
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - kube3
{{ end }}
kube1:~/pv# helm template my-ha-pv-chart my-ha-pv-chart/ kube1:~/pv# helm install my-ha-pv-chart my-ha-pv-chart/ kube1# kubectl get pv kube1:~/pv# ###helm uninstall my-ha-pv-chart
kube1:~/pywebd-k8s# cat my-webd-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: my-webd-pvc
spec:
  storageClassName: my-ha-sc
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 64Mi
kube1:~/pywebd-k8s# kubectl apply -f my-webd-pvc.yaml -n my-ns kube1:~/pywebd-k8s# kubectl get pvc -n my-ns kube1:~/pywebd-k8s# cat my-webd-deployment.yaml
...
        volumeMounts:
        - name: htdocs-volume
          mountPath: /usr/local/apache2/htdocs
        lifecycle:
        ...
      volumes:
      - name: htdocs-volume
        persistentVolumeClaim:
          claimName: my-webd-pvc
      initContainers:
      ...
kube3# find /disk2
$ kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml
$ kubectl get sc
$ kubectl -n local-path-storage get all
$ curl https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml | less
/DEFAULT_PATH_FOR_NON_LISTED_NODES
ssh root@kube1 'mkdir /opt/local-path-provisioner'
ssh root@kube2 'mkdir /opt/local-path-provisioner'
ssh root@kube3 'mkdir /opt/local-path-provisioner'
ssh root@kube1 'chmod 777 /opt/local-path-provisioner'
ssh root@kube2 'chmod 777 /opt/local-path-provisioner'
ssh root@kube3 'chmod 777 /opt/local-path-provisioner'
$ ###kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
$ kubectl get pvc -n my-keycloak-ns $ kubectl get pv $ ###kubectl -n my-keycloak-ns delete pvc data-my-keycloak-postgresql-0
kubeN:~# apt install open-iscsi (venv1) server:~# ansible all -f 4 -m apt -a 'pkg=open-iscsi state=present update_cache=true' -i /root/kubespray/inventory/mycluster/hosts.yaml
$ kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml $ kubectl -n longhorn-system get pods -o wide --watch Setting->General Pod Deletion Policy When Node is Down: delete-statefuset-pod
Подключение через kubectl proxy
cmder> kubectl proxy
Подключение через ingress
!!! Добавить пример с аутентификацией !!!
student@server:~/longhorn$ cat ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: longhorn-ingress
  namespace: longhorn-system
spec:
  ingressClassName: nginx
  rules:
  - host: lh.corp13.un
    http:
      paths:
      - backend:
          service:
            name: longhorn-frontend
            port:
              number: 80
        path: /
        pathType: Prefix
kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak-postgresql
kube1:~# kubectl -n my-keycloak-ns scale --replicas 1 statefulset my-keycloak-postgresql kube1:~# kubectl -n my-keycloak-ns scale --replicas 2 statefulset my-keycloak
Setting -> General -> Backup Target -> nfs://server.corp13.un:/var/www (nfs client linux не нужен)
server# scp /etc/pywebd/* kube1:/tmp/ kube1:~/pywebd-k8s# kubectl create configmap pywebd-conf --from-file=/tmp/pywebd.conf --dry-run=client -o yaml | tee my-webd-configmap.yaml kube1:~/pywebd-k8s# cat my-webd-configmap.yaml
apiVersion: v1
data:
  pywebd.conf: |
    [default]
    DocumentRoot = /usr/local/apache2/htdocs
    Listen = 4443
kind: ConfigMap
metadata:
  creationTimestamp: null
  name: pywebd-conf
kube1:~/pywebd-k8s# kubectl apply -f my-webd-configmap.yaml -n my-ns kube1:~/pywebd-k8s# kubectl -n my-ns get configmaps kube1:~/pywebd-k8s# kubectl create secret tls pywebd-tls --key /tmp/pywebd.key --cert /tmp/pywebd.crt --dry-run=client -o yaml | tee my-webd-secret-tls.yaml kube1:~/pywebd-k8s# less my-webd-secret-tls.yaml
apiVersion: v1 data: tls.crt: ... tls.key: ... kind: Secret metadata: creationTimestamp: null name: pywebd-tls type: kubernetes.io/tls
kube1:~/pywebd-k8s# rm -rv /tmp/pywebd.* kube1:~/pywebd-k8s# kubectl apply -f my-webd-secret-tls.yaml -n my-ns kube1:~/pywebd-k8s# kubectl -n my-ns get secrets kube1:~/pywebd-k8s# kubectl create secret docker-registry regcred --docker-server=server.corpX.un:5000 --docker-username=student --docker-password='strongpassword' -n my-ns kube1:~/pywebd-k8s# cat my-webd-deployment.yaml
...
      imagePullSecrets:
      - name: regcred
      containers:
      - name: my-webd
        image: server.corpX.un:5000/student/pywebd:ver1.2
        imagePullPolicy: "Always"
#        env:
#          ...
...
        livenessProbe:
          httpGet:
            port: 4443
            scheme: HTTPS
...
        volumeMounts:
...
        - name: conf-volume
          subPath: pywebd.conf
          mountPath: /etc/pywebd/pywebd.conf
        - name: secret-tls-volume
          subPath: tls.crt
          mountPath: /etc/pywebd/pywebd.crt
        - name: secret-tls-volume
          subPath: tls.key
          mountPath: /etc/pywebd/pywebd.key
...
      volumes:
...
      - name: conf-volume
        configMap:
          name: pywebd-conf
      - name: secret-tls-volume
        secret:
          secretName: pywebd-tls
...
kubeN$ curl --connect-to "":"":<POD_IP>:4443 https://pywebd.corpX.un
root@node1:~# cat sshd_config
PermitRootLogin yes PasswordAuthentication no ChallengeResponseAuthentication no UsePAM no
root@node1:~# kubectl create configmap ssh-config --from-file=sshd_config --dry-run=client -o yaml ... server:~# cat .ssh/id_rsa.pub ... root@node1:~# cat my-openssh-server-deployment.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: ssh-config
data:
  sshd_config: |
    PermitRootLogin yes
    PasswordAuthentication no
    ChallengeResponseAuthentication no
    UsePAM no
  authorized_keys: |
    ssh-rsa AAAAB.....C0zOcZ68= root@server.corpX.un
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-openssh-server
spec:
  selector:
    matchLabels:
      app: my-openssh-server
  template:
    metadata:
      labels:
        app: my-openssh-server
    spec:
      containers:
      - name: my-openssh-server
        image: linuxserver/openssh-server
        command: ["/bin/sh"]
        args: ["-c", "/usr/bin/ssh-keygen -A; usermod -p '*' root; /usr/sbin/sshd.pam -D"]
        ports:
        - containerPort: 22
        volumeMounts:
        - name: ssh-volume
          subPath: sshd_config
          mountPath: /etc/ssh/sshd_config
        - name: ssh-volume
          subPath: authorized_keys
          mountPath: /root/.ssh/authorized_keys
      volumes:
      - name: ssh-volume
        configMap:
          name: ssh-config
---
apiVersion: v1
kind: Service
metadata:
  name: my-openssh-server
spec:
  type: NodePort
  ports:
  - port: 22
    nodePort: 32222
  selector:
    app: my-openssh-server
root@node1:~# kubectl apply -f my-openssh-server-deployment.yaml root@node1:~# iptables-save | grep 32222 root@node1:~# ###kubectl exec -ti my-openssh-server-NNNNNNNN-NNNNN -- bash server:~# ssh -p 32222 nodeN Welcome to OpenSSH Server my-openssh-server-NNNNNNNN-NNNNN:~# nslookup my-openssh-server.default.svc.cluster.local
gitlab-runner@gate:~/webd$ cat my-webd-ssh-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-webd-ssh
  namespace: my-ns
spec:
  selector:
    matchLabels:
      app: my-webd-ssh
  replicas: 1
  template:
    metadata:
      labels:
        app: my-webd-ssh
    spec:
      containers:
      - name: my-webd
        image: server.corpX.un:5000/student/webd:latest
        volumeMounts:
        - name: html
          mountPath: /var/www
      - name: my-ssh
        image: atmoz/sftp
        args: ["user3:password3:10003"]
        volumeMounts:
        - name: html
          mountPath: /home/user3/www
      volumes:
      - name: html
        emptyDir: {}
... $ kubectl describe pod my-webd-NNNNNNNNNN-NNNNN -n my-ns $ kubectl exec -ti -n my-ns my-webd-NNNNNNNNNN-NNNNN -c my-ssh -- bash $ ### kubectl expose deployment my-webd-ssh --type=NodePort --port=80,22 -n my-ns $ cat my-webd-ssh-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-webd-ssh
  namespace: my-ns
spec:
  type: NodePort
  selector:
    app: my-webd-ssh
  ports:
  - name: http
    protocol: TCP
    port: 80
    targetPort: 80
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
# wget https://get.helm.sh/helm-v3.16.4-linux-amd64.tar.gz # tar -zxvf helm-*-linux-amd64.tar.gz # mv linux-amd64/helm /usr/local/bin/helm $ cat ~/.profile
... source <(helm completion bash)
$ helm upgrade ingress-nginx --install ingress-nginx \ --set controller.hostNetwork=true,controller.publishService.enabled=false,controller.kind=DaemonSet,controller.config.use-forwarded-headers=true \ --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace $ helm list --namespace ingress-nginx $ helm list -A $ kubectl get all -n ingress-nginx -o wide $ helm delete ingress-nginx --namespace ingress-nginx $ mkdir -p ingress-nginx; cd ingress-nginx $ helm template ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx | tee t1.yaml $ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx | tee values.yaml.orig $ cat values.yaml
controller:
  hostNetwork: true
  publishService:
    enabled: false
  kind: DaemonSet
#  config:
#    use-forwarded-headers: true
#    allow-snippet-annotations: true
#  service:
#    type: LoadBalancer
#    loadBalancerIP: "192.168.X.64"
$ helm template ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx | tee t2.yaml $ helm upgrade ingress-nginx -i ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx --create-namespace $ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf | grep use_forwarded_headers $ kubectl -n ingress-nginx describe service/ingress-nginx-controller ... Endpoints: 192.168.X.221:80,192.168.X.222:80,192.168.X.223:80 ... # kubectl get clusterrole -A | grep -i ingress # kubectl get clusterrolebindings -A | grep -i ingress # kubectl get validatingwebhookconfigurations -A | grep -i ingress # ###helm uninstall ingress-nginx -n ingress-nginx
~/gowebd-k8s$ helm create webd-chart $ less webd-chart/templates/deployment.yaml $ cat webd-chart/Chart.yaml
... description: A Helm chart WebD for Kubernetes ... version: 0.1.1 icon: https://val.bmstu.ru/unix/Media/logo.gif ... appVersion: "latest" #appVersion: ver1.7 #for vanilla argocd
$ cat webd-chart/values.yaml
...
replicaCount: 2
image:
  repository: server.corpX.un:5000/student/webd
  pullPolicy: Always
...
serviceAccount:
  create: false
...
service:
#  type: NodePort
...
ingress:
  enabled: true
  className: "nginx"
...
  hosts:
    - host: webd.corpX.un
...
#  tls: []
#  tls:
#    - secretName: gowebd-tls
#      hosts:
#        - gowebd.corpX.un
...
#APWEBD_HOSTNAME: "apwebd.corp13.un"
#KEYCLOAK_HOSTNAME: "keycloak.corp13.un"
#REALM_NAME: "corp13"
$ less webd-chart/templates/deployment.yaml
...
          imagePullPolicy: {{ .Values.image.pullPolicy }}
#          env:
#          - name: APWEBD_HOSTNAME
#            value: "{{ .Values.APWEBD_HOSTNAME }}"
#          - name: KEYCLOAK_HOSTNAME
#            value: "{{ .Values.KEYCLOAK_HOSTNAME }}"
#          - name: REALM_NAME
#            value: "{{ .Values.REALM_NAME }}"
...
$ helm lint webd-chart/ $ helm template my-webd webd-chart/ | less $ helm install my-webd webd-chart/ -n my-ns --create-namespace --wait $ curl kubeN -H "Host: gowebd.corpX.un" $ kubectl describe events -n my-ns | less $ export HELM_NAMESPACE=my-ns $ helm list $ ### helm upgrade my-webd webd-chart/ --set=image.tag=ver1.10 $ helm history my-webd $ helm rollback my-webd 1 $ helm uninstall my-webd
~/gowebd-k8s$ helm repo add --username student --password NNNNN-NNNNNNNNNNNNNNNNNNN webd https://server.corpX.un/api/v4/projects/N/packages/helm/stable "webd" has been added to your repositories ~/gowebd-k8s$ helm repo list ~/gowebd-k8s$ helm package webd-chart ~/gowebd-k8s$ tar -tf webd-chart-0.1.1.tgz ~/gowebd-k8s$ helm plugin install https://github.com/chartmuseum/helm-push ~/gowebd-k8s$ helm cm-push webd-chart-0.1.1.tgz webd ~/gowebd-k8s$ rm webd-chart-0.1.1.tgz ~/gowebd-k8s$ ### helm repo remove webd ~/gowebd-k8s$ ### helm plugin uninstall cm-push
kube1:~# helm repo add webd https://server.corpX.un/api/v4/projects/N/packages/helm/stable kube1:~# helm repo update kube1:~# helm search repo webd kube1:~# helm repo update webd kube1:~# helm show values webd/webd-chart | tee values.yaml.orig kube1:~# ###helm pull webd/webd-chart kube1:~# helm install my-webd webd/webd-chart kube1:~# ###helm uninstall my-webd kube1:~# ###helm repo remove webd
kube1:~# mkdir gowebd; cd gowebd kube1:~/gowebd# ###helm pull webd-chart --repo https://server.corpX.un/api/v4/projects/N/packages/helm/stable kube1:~/gowebd# helm show values webd-chart --repo https://server.corpX.un/api/v4/projects/N/packages/helm/stable | tee values.yaml.orig kube1:~/gowebd# cat values.yaml
replicaCount: 3 image: tag: "ver1.1" #REALM_NAME: "corp"
kube1:~/gowebd# helm upgrade my-webd -i webd-chart -f values.yaml -n my-ns --create-namespace --repo https://server.corpX.un/api/v4/projects/N/packages/helm/stable $ curl http://kubeN -H "Host: gowebd.corpX.un" kube1:~/gowebd# ###helm uninstall my-webd -n my-ns
kube1:~/gitlab-runner# kubectl create ns gitlab-runner kube1:~/gitlab-runner# kubectl -n gitlab-runner create configmap ca-crt --from-file=/usr/local/share/ca-certificates/ca.crt kube1:~/gitlab-runner# helm repo add gitlab https://charts.gitlab.io kube1:~/gitlab-runner# helm repo list kube1:~/gitlab-runner# helm search repo -l gitlab kube1:~/gitlab-runner# helm search repo -l gitlab/gitlab-runner kube1:~/gitlab-runner# helm show values gitlab/gitlab-runner --version 0.70.5 | tee values.yaml kube1:~/gitlab-runner# cat values.yaml
...
gitlabUrl: https://server.corpX.un
...
runnerToken: "NNNNNNNNNNNNNNNNNNNNN"
...
rbac:
...
  create: true                      #change this
...
serviceAccount:
...
  create: true                      #change this
...
runners:
...
  config: |
    [[runners]]
      tls-ca-file = "/mnt/ca.crt"   #insert this
      [runners.kubernetes]
        namespace = "{{.Release.Namespace}}"
        image = "alpine"
        privileged = true           #insert this
...
securityContext:
  allowPrivilegeEscalation: true    #change this
  readOnlyRootFilesystem: false
  runAsNonRoot: true
  privileged: true                  #change this
...
#volumeMounts: []                   #comment this
volumeMounts:
  - name: ca-crt
    subPath: ca.crt
    mountPath: /mnt/ca.crt
...
#volumes: []                        #comment this
volumes:
  - name: ca-crt
    configMap:
      name: ca-crt
...
kube1:~/gitlab-runner# helm upgrade -i gitlab-runner gitlab/gitlab-runner -f values.yaml -n gitlab-runner --version 0.70.5 kube1:~/gitlab-runner# kubectl get all -n gitlab-runner kube1:~/gitlab-runner# ### helm -n gitlab-runner uninstall gitlab-runner
gitlab-runner@server:~$ helm repo add gitlab https://charts.gitlab.io gitlab-runner@server:~$ helm repo list gitlab-runner@server:~$ helm search repo -l gitlab gitlab-runner@server:~$ helm search repo -l gitlab/gitlab-runner gitlab-runner@server:~$ helm show values gitlab/gitlab-runner --version 0.56.0 | tee values.yaml gitlab-runner@server:~$ diff values.yaml values.yaml.orig
... gitlabUrl: http://server.corpX.un/ ... runnerRegistrationToken: "NNNNNNNNNNNNNNNNNNNNNNNN" ... 148,149c142 < create: true --- > create: false 325d317 < privileged = true 432c424 < allowPrivilegeEscalation: true --- > allowPrivilegeEscalation: false 435c427 < privileged: true --- > privileged: false
gitlab-runner@server:~$ helm upgrade -i gitlab-runner gitlab/gitlab-runner -f values.yaml -n gitlab-runner --create-namespace --version 0.56.0 gitlab-runner@server:~$ kubectl get all -n gitlab-runner
# kubectl -n gitlab-runner create configmap wild-crt --from-file=wild.crt # cat values.yaml
...
gitlabUrl: https://server.corpX.un/
...
  config: |
    [[runners]]
      tls-ca-file = "/mnt/wild.crt"
      [runners.kubernetes]      
...
#volumeMounts: []
volumeMounts:
  - name: wild-crt
    subPath: wild.crt
    mountPath: /mnt/wild.crt
    
#volumes: []
volumes:
  - name: wild-crt
    configMap:
      name: wild-crt
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml $ cat dashboard-user-role.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
$ kubectl apply -f dashboard-user-role.yaml
$ kubectl -n kubernetes-dashboard create token admin-user
$ kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d ; echo
cmder$ kubectl proxy
kube1:~/metrics-server# curl -L https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.7.2/components.yaml | tee metrics-server-components.yaml kube1:~/metrics-server# cat metrics-server-components.yaml
...
      containers:
      - args:
        - --cert-dir=/tmp
        - --kubelet-insecure-tls   # add this
...
kube1:~/metrics-server# kubectl apply -f metrics-server-components.yaml kube1# kubectl get pods -A | grep metrics-server kube1# kubectl top pod #-n kube-system kube1# kubectl top pod -A --sort-by=memory kube1# kubectl top node
kube1# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts kube1# helm repo update kube1# helm install kube-state-metrics prometheus-community/kube-state-metrics -n vm --create-namespace kube1# curl kube-state-metrics.vm.svc.cluster.local:8080/metrics
kubeN:~# more /etc/kubernetes/manifests/kube-apiserver.yaml kubeN:~# etcdctl member list -w table \ --endpoints=https://kube1:2379 \ --cacert=/etc/ssl/etcd/ssl/ca.pem \ --cert=/etc/ssl/etcd/ssl/node-kube1.pem \ --key=/etc/ssl/etcd/ssl/node-kube1-key.pem kubeN:~# etcdctl endpoint status -w table \ --endpoints=https://kube1:2379,https://kube2:2379,https://kube3:2379 \ --cacert=/etc/ssl/etcd/ssl/ca.pem \ --cert=/etc/ssl/etcd/ssl/node-kube1.pem \ --key=/etc/ssl/etcd/ssl/node-kube1-key.pem
~/kubespray# cat inventory/mycluster/group_vars/all/docker.yml
... docker_registry_mirrors: - https://mirror.gcr.io ...
~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
...
containerd_registries_mirrors:
  - prefix: docker.io
    mirrors:
    - host: https://mirror.gcr.io
      capabilities: ["pull", "resolve"]
      skip_verify: false
...
mkdir /etc/apt/keyrings curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list apt update && apt install -y kubeadm=1.28.1-1.1 kubelet=1.28.1-1.1 kubectl=1.28.1-1.1
cat ~/.kube/config | yq -r '.clusters[0].cluster."certificate-authority-data"' | base64 -d - > ~/.kube/ca.pem 
cat ~/.kube/config | yq -r '.users[0].user."client-certificate-data"' | base64 -d - > ~/.kube/user.pem
cat ~/.kube/config | yq -r '.users[0].user."client-key-data"' | base64 -d - > ~/.kube/user-key.pem
SERVER_URL=$(cat ~/.kube/config | yq -r .clusters[0].cluster.server)
curl --cacert ~/.kube/ca.pem --cert ~/.kube/user.pem --key ~/.kube/user-key.pem -X GET  ${SERVER_URL}/api/v1/namespaces/default/pods/
student@node2:~$ sudo apt install conntrack https://computingforgeeks.com/install-mirantis-cri-dockerd-as-docker-engine-shim-for-kubernetes/ ... wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz ... student@node2:~$ minikube start --driver=none --insecure-registry "server.corpX.un:5000"
student@node1:~$ minikube dashboard & ... Opening http://127.0.0.1:NNNNN/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser ... /home/mobaxterm> ssh -L NNNNN:localhost:NNNNN student@192.168.X.10 Теперь, та же ссылка работает на win host системе
student@node1:~$ tar -cvzf kube-config.tar.gz .kube/config .minikube/ca.crt .minikube/profiles/minikube gitlab-runner@server:~$ scp student@node1:kube-config.tar.gz . gitlab-runner@server:~$ tar -xvf kube-config.tar.gz gitlab-runner@server:~$ cat .kube/config
...
    certificate-authority: /home/gitlab-runner/.minikube/ca.crt
...
    client-certificate: /home/gitlab-runner/.minikube/profiles/minikube/client.crt
    client-key: /home/gitlab-runner/.minikube/profiles/minikube/client.key
...
root@gate:~# curl -L https://github.com/kubernetes/kompose/releases/download/v1.26.0/kompose-linux-amd64 -o kompose root@gate:~# chmod +x kompose root@gate:~# sudo mv ./kompose /usr/local/bin/kompose
gitlab-runner@gate:~/webd$ kompose convert gitlab-runner@gate:~/webd$ ls *yaml gitlab-runner@gate:~/webd$ kubectl apply -f sftp-deployment.yaml,vol1-persistentvolumeclaim.yaml,webd-service.yaml,sftp-service.yaml,webd-deployment.yaml gitlab-runner@gate:~/webd$ kubectl get all