This is an old revision of the document!
# curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl # chmod +x kubectl # mv kubectl /usr/local/bin/
cmder$ curl -LO "https://dl.k8s.io/release/v1.29.0/bin/windows/amd64/kubectl.exe" cmder$ mv kubectl.exe /usr/bin
mkdir ~/.kube/ scp root@192.168.X.2N1:.kube/config ~/.kube/ cat ~/.kube/config
...
server: https://192.168.X.2N1:6443
...
kubectl get all -o wide --all-namespaces kubectl get all -o wide -A
gitlab-runner@server:~$ source <(kubectl completion bash)
gitlab-runner@server:~$ scp root@kube1:.kube/config .kube/config_kube1 gitlab-runner@server:~$ cat .kube/config_kube1
...
.kube/config_kube1
...
gitlab-runner@server:~$ export KUBECONFIG=~/.kube/config_kube1 gitlab-runner@server:~$ kubectl get nodes
root@server:~# apt install -y curl wget apt-transport-https root@server:~# wget https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 root@server:~# mv minikube-linux-amd64 /usr/local/bin/minikube root@server:~# chmod +x /usr/local/bin/minikube
gitlab-runner@server:~$ ### minikube delete gitlab-runner@server:~$ ### rm -rv .minikube/ gitlab-runner@server:~$ time minikube start --driver=docker --insecure-registry "server.corpX.un:5000" real 29m8.320s ... gitlab-runner@server:~$ minikube status gitlab-runner@server:~$ minikube ip gitlab-runner@server:~$ minikube addons list gitlab-runner@server:~$ minikube addons configure registry-creds #Не нужно для registry попубличных проектов ... Do you want to enable Docker Registry? [y/n]: y -- Enter docker registry server url: http://server.corpX.un:5000 -- Enter docker registry username: student -- Enter docker registry password: ... gitlab-runner@server:~$ minikube addons enable registry-creds gitlab-runner@server:~$ minikube kubectl -- get pods -A gitlab-runner@server:~$ alias kubectl='minikube kubectl --' gitlab-runner@server:~$ kubectl get pods -A
или
gitlab-runner@server:~$ ###minikube stop gitlab-runner@server:~$ ###minikube start
node1# ssh-keygen node1# ssh-copy-id node2 node1# ssh-copy-id node3 node1# bash -c ' swapoff -a ssh node2 swapoff -a ssh node3 swapoff -a ' node1# bash -c ' sed -i"" -e "/swap/s/^/#/" /etc/fstab ssh node2 sed -i"" -e "/swap/s/^/#/" /etc/fstab ssh node3 sed -i"" -e "/swap/s/^/#/" /etc/fstab '
node1# bash -c ' http_proxy=http://proxy.isp.un:3128/ apt -y install apt-transport-https curl ssh node2 http_proxy=http://proxy.isp.un:3128/ apt -y install apt-transport-https curl ssh node3 http_proxy=http://proxy.isp.un:3128/ apt -y install apt-transport-https curl ' node1# bash -c ' curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add ssh node2 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add" ssh node3 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add" ' node1# bash -c ' apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main" ssh node2 apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" ssh node3 apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" ' node1# bash -c ' http_proxy=http://proxy.isp.un:3128/ apt -y install kubeadm kubelet kubectl kubernetes-cni ssh node2 http_proxy=http://proxy.isp.un:3128/ apt -y install kubeadm kubelet kubectl kubernetes-cni ssh node3 http_proxy=http://proxy.isp.un:3128/ apt -y install kubeadm kubelet kubectl kubernetes-cni ' https://forum.linuxfoundation.org/discussion/864693/the-repository-http-apt-kubernetes-io-kubernetes-xenial-release-does-not-have-a-release-file !!!! Внимание на каждом узле нужно сделать: !!!! удалить из /etc/apt/sources.list строчку с kubernetes mkdir /etc/apt/keyrings curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list apt update apt install -y kubeadm=1.28.1-1.1 kubelet=1.28.1-1.1 kubectl=1.28.1-1.1
root@node1:~# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.X.201 root@node1:~# mkdir -p $HOME/.kube root@node1:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config root@node1:~# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml root@node1:~# kubectl get pod -o wide --all-namespaces root@node1:~# kubectl get --raw='/readyz?verbose'
node1# bash -c ' rm /etc/containerd/config.toml systemctl restart containerd ssh node2 rm /etc/containerd/config.toml ssh node2 systemctl restart containerd ssh node3 rm /etc/containerd/config.toml ssh node3 systemctl restart containerd '
root@node2_3:~# curl -k https://node1:6443/livez?verbose
root@node2_3:~# kubeadm join 192.168.X.201:6443 --token NNNNNNNNNNNNNNNNNNNN \
--discovery-token-ca-cert-hash sha256:NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN
root@node1:~# kubectl cluster-info root@node1:~# kubectl get nodes -o wide
$ kubectl cordon kube3 $ time kubectl drain kube3 --force --ignore-daemonsets --delete-emptydir-data $ kubectl delete node kube3
node1# bash -c ' kubeadm reset ssh node2 kubeadm reset ssh node3 kubeadm reset '
root@node1:~# mkdir /etc/containerd/ root@node1:~# cat /etc/containerd/config.toml
version = 2
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."server.corpX.un:5000"]
endpoint = ["http://server.corpX.un:5000"]
# no need
# [plugins."io.containerd.grpc.v1.cri".registry.configs]
# [plugins."io.containerd.grpc.v1.cri".registry.configs."server.corpX.un:5000".tls]
# insecure_skip_verify = true
# don't work in cri-tools 1.25, need public project
#[plugins."io.containerd.grpc.v1.cri".registry.configs."server.corpX.un:5000".auth]
# auth = "c3R1ZGVudDpwYXNzd29yZA=="
node1# bash -c ' ssh node2 mkdir /etc/containerd/ ssh node3 mkdir /etc/containerd/ scp /etc/containerd/config.toml node2:/etc/containerd/config.toml scp /etc/containerd/config.toml node3:/etc/containerd/config.toml systemctl restart containerd ssh node2 systemctl restart containerd ssh node3 systemctl restart containerd ' root@nodeN:~# containerd config dump | less
Проверка
root@nodeN:~# crictl -r unix:///run/containerd/containerd.sock pull server.corpX.un:5000/student/gowebd
kube1# ssh-keygen kube1# ssh-copy-id kube1;ssh-copy-id kube2;ssh-copy-id kube3;ssh-copy-id kube4; kube1# apt update kube1# apt install python3-pip -y kube1# git clone https://github.com/kubernetes-sigs/kubespray kube1# cd kubespray/ ~/kubespray# grep -r containerd_insecure_registries . ~/kubespray# git log ~/kubespray# git branch -r ~/kubespray# ### git checkout origin/release-2.22 ~/kubespray# git tag -l ~/kubespray# ### git checkout tags/v2.22.1 ~/kubespray# git checkout 4c37399c7582ea2bfb5202c3dde3223f9c43bf59 ~/kubespray# ### git checkout master
~/kubespray# time pip3 install -r requirements.txt
real 1m48.202s
~/kubespray# cp -rvfpT inventory/sample inventory/mycluster
~/kubespray# declare -a IPS=(kube1,192.168.X.221 kube2,192.168.X.222 kube3,192.168.X.223)
~/kubespray# CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
~/kubespray# less inventory/mycluster/hosts.yaml
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml
real 45m31.796s
kube1# less ~/.kube/config
~/kubespray# ###time ansible-playbook -i inventory/mycluster/hosts.yaml reset.yml
real 7m31.796s
~/kubespray# cat inventory/mycluster/hosts.yaml
...
node4:
ansible_host: 192.168.X.204
ip: 192.168.X.204
access_ip: 192.168.X.204
...
kube_node:
...
node4:
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml --limit=kube4 scale.yml real 17m37.459s $ kubectl get nodes -o wide
~/kubespray# cat inventory/mycluster/group_vars/all/containerd.yml
...
containerd_insecure_registries:
"server.corpX.un:5000": "http://server.corpX.un:5000"
containerd_registry_auth:
- registry: server.corpX.un:5000
username: student
password: Pa$$w0rd
...
~/kubespray# time ansible-playbook -i inventory/mycluster/hosts.yaml cluster.yml user 46m37.151s # less /etc/containerd/config.toml
~/kubespray# cat inventory/mycluster/group_vars/k8s_cluster/addons.yml
... helm_enabled: true ... ingress_nginx_enabled: true ingress_nginx_host_network: true ...
$ kubectl api-resources
$ kubectl run my-debian --image=debian -- "sleep" "3600"
$ ###kubectl run -ti --rm my-debian --image=debian --overrides='{"spec": { "nodeSelector": {"kubernetes.io/hostname": "kube4"}}}'
$ kubectl get all
kubeN# crictl ps | grep debi
kubeN# crictl images
nodeN# ctr ns ls
nodeN# ctr -n=k8s.io image ls | grep debi
$ kubectl delete pod my-debian
$ kubectl create deployment my-debian --image=debian -- "sleep" "3600"
$ kubectl get deployments
$ kubectl attach my-debian-NNNNNNNNN-NNNNN $ kubectl exec -ti my-debian-NNNNNNNNN-NNNNN -- bash Ctrl-D $ kubectl get deployment my-debian -o yaml
$ kubectl edit deployment my-debian $ kubectl get pods -o wide $ kubectl delete deployment my-debian
$ cat my-debian-deployment.yaml
apiVersion: apps/v1
kind: ReplicaSet
#kind: Deployment
metadata:
name: my-debian
spec:
selector:
matchLabels:
app: my-debian
replicas: 2
template:
metadata:
labels:
app: my-debian
spec:
containers:
- name: my-debian
image: debian
command: ["/bin/sh"]
args: ["-c", "while true; do echo hello; sleep 3;done"]
restartPolicy: Always
$ kubectl apply -f my-debian-deployment.yaml ... $ kubectl delete -f my-debian-deployment.yaml
$ kubectl create namespace my-ns $ kubectl get namespaces $ ### kubectl create deployment my-webd --image=server.corpX.un:5000/student/webd:latest --replicas=2 -n my-ns $ ### kubectl delete deployment my-webd -n my-ns $ cd webd/ $ cat my-webd-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-webd
spec:
selector:
matchLabels:
app: my-webd
replicas: 2
template:
metadata:
labels:
app: my-webd
spec:
containers:
- name: my-webd
# image: server.corpX.un:5000/student/webd
# image: server.corpX.un:5000/student/webd:ver1.N
# imagePullPolicy: "Always"
# image: httpd
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from apache2 on $(hostname) > /usr/local/apache2/htdocs/index.html"]
# env:
# - name: APWEBD_HOSTNAME
# value: "apwebd.corpX.un"
# - name: KEYCLOAK_HOSTNAME
# value: "keycloak.corpX.un"
# - name: REALM_NAME
# value: "corpX"
# livenessProbe:
# httpGet:
# port: 80
# volumeMounts:
# - name: nfs-volume
# mountPath: /var/www
# volumes:
# - name: nfs-volume
# nfs:
# server: server.corpX.un
# path: /var/www
$ kubectl apply -f my-webd-deployment.yaml -n my-ns $ kubectl get all -n my-ns -o wide $ kubectl describe -n my-ns pod/my-webd-NNNNNNNNNN-NNNNN $ kubectl scale deployment my-webd --replicas=3 -n my-ns $ kubectl delete pod/my-webd-NNNNNNNNNN-NNNNN -n my-ns
$ ### kubectl expose deployment my-webd --type=NodePort --port=80 -n my-ns $ ### kubectl delete svc my-webd -n my-ns $ cat my-webd-service.yaml
apiVersion: v1
kind: Service
metadata:
name: my-webd
spec:
# type: NodePort
# type: LoadBalancer
# loadBalancerIP: 192.168.X.64
selector:
app: my-webd
ports:
- protocol: TCP
port: 80
# nodePort: 30111
$ kubectl apply -f my-webd-service.yaml -n my-ns $ kubectl logs -l app=my-webd -n my-ns (доступны опции -f, --tail=2000, --previous)
$ kubectl get svc my-webd -n my-ns NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE my-webd-svc NodePort 10.102.135.146 <none> 80:NNNNN/TCP 18h $ kubectl describe svc my-webd -n my-ns $ curl http://node1,2,3:NNNNN на "самодельном kubeadm" кластере работает не стабильно
$ minikube service list $ minikube service my-webd -n my-ns --url http://192.168.49.2:NNNNN $ curl $(minikube service my-webd -n my-ns --url)
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml $ kubectl -n metallb-system get all $ cat first-pool.yaml
--- apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: first-pool namespace: metallb-system spec: addresses: - 192.168.13.64/28 autoAssign: false --- apiVersion: metallb.io/v1beta1 kind: L2Advertisement metadata: name: first-pool-advertisement namespace: metallb-system spec: ipAddressPools: - first-pool interfaces: - eth0
$ kubectl apply -f first-pool.yaml $ ### kubectl delete -f first-pool.yaml && rm first-pool.yaml $ ### kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.3/config/manifests/metallb-native.yaml
kube1# host my-webd.my-ns.svc.cluster.local 169.254.25.10 ...10.102.135.146... server# ssh -p 32222 nodeN my-openssh-server-NNNNNNNN-NNNNN:~# curl my-webd.my-ns.svc.cluster.local ИЛИ my-openssh-server-NNNNNNNN-NNNNN:~# curl my-webd-webd-chart.my-ns.svc.cluster.local
node1/kube1# kubectl port-forward -n my-ns --address 0.0.0.0 services/my-webd 1234:80 cmder> kubectl port-forward -n my-ns services/my-webd 1234:80
node1/kube1# kubectl -n my-ns delete pod/my-webd...
kube1:~# kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' cmder> kubectl proxy
$ kubectl get all -n my-ns $ kubectl delete -n my-ns -f my-webd-deployment.yaml,my-webd-service.yaml или $ kubectl delete namespace my-ns
server# cat /etc/bind/corpX.un
... webd A 192.168.49.2
gitlab-runner@server:~$ minikube addons enable ingress
server# cat /etc/bind/corpX.un
...
webd A 192.168.X.202
A 192.168.X.203
gowebd CNAME webd
node1# curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.1/deploy/static/provider/baremetal/deploy.yaml | tee ingress-nginx.controller-v1.3.1.baremetal.yaml node1# cat ingress-nginx.controller-v1.3.1.baremetal.yaml
...
kind: Deployment
...
spec:
...
replicas: 3 ### insert this (equial count of worker nodes)
template:
...
terminationGracePeriodSeconds: 300
hostNetwork: true ###insert this
volumes:
...
node1# kubectl apply -f ingress-nginx.controller-v1.3.1.baremetal.yaml node1# kubectl get all -n ingress-nginx node1# ### kubectl delete -f ingress-nginx.controller-v1.3.1.baremetal.yaml
master-1:~$ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf master-1:~$ kubectl edit -n ingress-nginx configmaps ingress-nginx-controller
... data: use-forwarded-headers: "true" ...
node1# diff ingress-nginx.controller-v1.8.2.baremetal.yaml.orig ingress-nginx.controller-v1.8.2.baremetal.yaml
323a324 > use-forwarded-headers: "true" 391c392,393 < kind: Deployment --- > #kind: Deployment > kind: DaemonSet 409,412c411,414 < strategy: < rollingUpdate: < maxUnavailable: 1 < type: RollingUpdate --- > # strategy: > # rollingUpdate: > # maxUnavailable: 1 > # type: RollingUpdate 501a504 > hostNetwork: true
node1# kubectl -n ingress-nginx describe service/ingress-nginx-controller ... Endpoints: 192.168.X.221:80,192.168.X.222:80,192.168.X.223:80 ...
node1# ### kubectl create ingress my-ingress --class=nginx --rule="webd.corpX.un/*=my-webd:80" -n my-ns node1# cat my-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: my-ingress
spec:
ingressClassName: nginx
# tls:
# - hosts:
# - gowebd.corpX.un
# secretName: gowebd-tls
rules:
- host: webd.corpX.un
http:
paths:
- backend:
service:
name: my-webd
port:
number: 80
path: /
pathType: Prefix
- host: gowebd.corpX.un
http:
paths:
- backend:
service:
name: my-gowebd
port:
number: 80
path: /
pathType: Prefix
node1# kubectl apply -f my-ingress.yaml -n my-ns node1# kubectl get ingress -n my-ns NAME CLASS HOSTS ADDRESS PORTS AGE my-webd nginx webd.corpX.un,gowebd.corpX.un 192.168.X.202,192.168.X.203 80 14m $ curl webd.corpX.un $ curl gowebd.corpX.un $ curl https://gowebd.corpX.un #-kv $ curl http://nodeN/ -H "Host: webd.corpX.un" $ curl --connect-to "":"":kubeN:443 https://gowebd.corpX.un #-vk $ kubectl logs -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx -f node1# ### kubectl delete ingress my-ingress -n my-ns
$ kubectl create secret tls gowebd-tls --key gowebd.key --cert gowebd.crt -n my-ns
$ kubectl get secrets -n my-ns
$ kubectl get secret/gowebd-tls -o yaml -n my-ns
$ ###kubectl delete secret/gowebd-tls -n my-ns
root@node1:~# ssh node2 mkdir /disk2 root@node1:~# ssh node2 touch /disk2/disk2_node2 root@node1:~# kubectl label nodes node2 disk2=yes root@node1:~# kubectl get nodes --show-labels root@node1:~# ###kubectl label nodes node2 disk2- root@node1:~# cat my-debian-deployment.yaml
...
args: ["-c", "while true; do echo hello; sleep 3;done"]
volumeMounts:
- name: my-disk2-volume
mountPath: /data
# volumeMounts:
# - name: data
# mountPath: /data
volumes:
- name: my-disk2-volume
hostPath:
path: /disk2/
nodeSelector:
disk2: "yes"
# volumes:
# - name: data
# persistentVolumeClaim:
# claimName: my-ha-pvc-sz64m
restartPolicy: Always
root@node1:~# kubectl apply -f my-debian-deployment.yaml root@node1:~# kubectl get all -o wide
root@node1:~# cat my-ha-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: my-pv-node2-sz-128m-num-001
# name: my-pv-kube3-keycloak
labels:
type: local
spec:
## comment storageClassName for keycloak
storageClassName: my-ha-sc
capacity:
storage: 128Mi
# storage: 8Gi
# volumeMode: Filesystem
accessModes:
- ReadWriteMany
# - ReadWriteOnce
hostPath:
path: /disk2
persistentVolumeReclaimPolicy: Retain
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- node2
# - kube3
root@node1:~# kubectl apply -f my-ha-pv.yaml root@node1:~# kubectl get persistentvolume или root@node1:~# kubectl get pv root@kube1:~# ###ssh kube3 'mkdir /disk2/; chmod 777 /disk2/' ... root@node1:~# ###kubectl delete pv my-pv-<TAB> root@node1:~# cat my-ha-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-ha-pvc-sz64m
spec:
storageClassName: my-ha-sc
# storageClassName: local-path
accessModes:
- ReadWriteMany
resources:
requests:
storage: 64Mi
root@node1:~# kubectl apply -f my-ha-pvc.yaml root@node1:~# kubectl get persistentvolumeclaims или root@node1:~# kubectl get pvc ... root@node1:~# ### kubectl delete pvc my-ha-pvc-sz64m
$ kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml $ kubectl get sc $ kubectl -n local-path-storage get all $ curl https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.26/deploy/local-path-storage.yaml | less /DEFAULT_PATH_FOR_NON_LISTED_NODES ssh root@kube1 'mkdir /opt/local-path-provisioner' ssh root@kube2 'mkdir /opt/local-path-provisioner' ssh root@kube3 'mkdir /opt/local-path-provisioner' ssh root@kube1 'chmod 777 /opt/local-path-provisioner' ssh root@kube2 'chmod 777 /opt/local-path-provisioner' ssh root@kube3 'chmod 777 /opt/local-path-provisioner'
$ kubectl get pvc -n my-keycloak-ns $ kubectl get pv $ ###kubectl -n my-keycloak-ns delete pvc data-my-keycloak-postgresql-0
kubeN:~# apt install open-iscsi
$ kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.6.0/deploy/longhorn.yaml Setting->General Pod Deletion Policy When Node is Down: delete-statefuset-pod
Подключение через kubectl proxy
cmder> kubectl proxy
Подключение через ingress
!!! Добавить пример с аутентификацией !!!
student@server:~/longhorn$ cat ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: longhorn-system
spec:
ingressClassName: nginx
rules:
- host: lh.corp13.un
http:
paths:
- backend:
service:
name: longhorn-frontend
port:
number: 80
path: /
pathType: Prefix
kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak kube1:~# kubectl -n my-keycloak-ns scale --replicas 0 statefulset my-keycloak-postgresql
kube1:~# kubectl -n my-keycloak-ns scale --replicas 1 statefulset my-keycloak-postgresql kube1:~# kubectl -n my-keycloak-ns scale --replicas 2 statefulset my-keycloak
Setting -> General -> Backup Target -> nfs://server.corp13.un:/var/www (nfs client linux не нужен)
root@node1:~# cat sshd_config
PermitRootLogin yes PasswordAuthentication no ChallengeResponseAuthentication no UsePAM no
root@node1:~# kubectl create configmap ssh-config --from-file=sshd_config --dry-run=client -o yaml ... server:~# cat .ssh/id_rsa.pub ... root@node1:~# cat my-openssh-server-deployment.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: ssh-config
data:
sshd_config: |
PermitRootLogin yes
PasswordAuthentication no
ChallengeResponseAuthentication no
UsePAM no
authorized_keys: |
ssh-rsa AAAAB.....C0zOcZ68= root@server.corpX.un
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-openssh-server
spec:
selector:
matchLabels:
app: my-openssh-server
template:
metadata:
labels:
app: my-openssh-server
spec:
containers:
- name: my-openssh-server
image: linuxserver/openssh-server
command: ["/bin/sh"]
args: ["-c", "/usr/bin/ssh-keygen -A; usermod -p '*' root; /usr/sbin/sshd.pam -D"]
ports:
- containerPort: 22
volumeMounts:
- name: ssh-volume
subPath: sshd_config
mountPath: /etc/ssh/sshd_config
- name: ssh-volume
subPath: authorized_keys
mountPath: /root/.ssh/authorized_keys
volumes:
- name: ssh-volume
configMap:
name: ssh-config
---
apiVersion: v1
kind: Service
metadata:
name: my-openssh-server
spec:
type: NodePort
ports:
- port: 22
nodePort: 32222
selector:
app: my-openssh-server
root@node1:~# kubectl apply -f my-openssh-server-deployment.yaml root@node1:~# iptables-save | grep 32222 root@node1:~# ###kubectl exec -ti my-openssh-server-NNNNNNNN-NNNNN -- bash server:~# ssh -p 32222 nodeN Welcome to OpenSSH Server my-openssh-server-NNNNNNNN-NNNNN:~# nslookup my-openssh-server.default.svc.cluster.local
gitlab-runner@gate:~/webd$ cat my-webd-ssh-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-webd-ssh
namespace: my-ns
spec:
selector:
matchLabels:
app: my-webd-ssh
replicas: 1
template:
metadata:
labels:
app: my-webd-ssh
spec:
containers:
- name: my-webd
image: server.corpX.un:5000/student/webd:latest
volumeMounts:
- name: html
mountPath: /var/www
- name: my-ssh
image: atmoz/sftp
args: ["user3:password3:10003"]
volumeMounts:
- name: html
mountPath: /home/user3/www
volumes:
- name: html
emptyDir: {}
... $ kubectl describe pod my-webd-NNNNNNNNNN-NNNNN -n my-ns $ kubectl exec -ti -n my-ns my-webd-NNNNNNNNNN-NNNNN -c my-ssh -- bash $ ### kubectl expose deployment my-webd-ssh --type=NodePort --port=80,22 -n my-ns $ cat my-webd-ssh-service.yaml
apiVersion: v1
kind: Service
metadata:
name: my-webd-ssh
namespace: my-ns
spec:
type: NodePort
selector:
app: my-webd-ssh
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
- name: ssh
protocol: TCP
port: 22
targetPort: 22
# wget https://get.helm.sh/helm-v3.9.0-linux-amd64.tar.gz # tar -zxvf helm-v3.9.0-linux-amd64.tar.gz # mv linux-amd64/helm /usr/local/bin/helm
$ helm upgrade ingress-nginx --install ingress-nginx \ --set controller.hostNetwork=true,controller.publishService.enabled=false,controller.kind=DaemonSet,controller.config.use-forwarded-headers=true \ --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace $ helm list --namespace ingress-nginx $ helm list -A $ kubectl get all -n ingress-nginx -o wide $ helm delete ingress-nginx --namespace ingress-nginx $ mkdir ingress-nginx; cd ingress-nginx $ helm template ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx | tee t1.yaml $ helm show values ingress-nginx --repo https://kubernetes.github.io/ingress-nginx | tee values.yaml.orig $ cat values.yaml
controller:
hostNetwork: true
publishService:
enabled: false
kind: DaemonSet
# config:
# use-forwarded-headers: true
# allow-snippet-annotations: true
$ helm template ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx | tee t2.yaml $ helm upgrade ingress-nginx -i ingress-nginx -f values.yaml --repo https://kubernetes.github.io/ingress-nginx -n ingress-nginx --create-namespace $ kubectl exec -n ingress-nginx pods/ingress-nginx-controller-<TAB> -- cat /etc/nginx/nginx.conf | tee nginx.conf | grep use_forwarded_headers $ kubectl -n ingress-nginx describe service/ingress-nginx-controller ... Endpoints: 192.168.X.221:80,192.168.X.222:80,192.168.X.223:80 ... # kubectl get clusterrole -A | grep -i ingress # kubectl get clusterrolebindings -A | grep -i ingress # kubectl get validatingwebhookconfigurations -A | grep -i ingress
gitlab-runner@server:~/gowebd-k8s$ helm create webd-chart $ less webd-chart/templates/deployment.yaml $ cat webd-chart/Chart.yaml
... description: A Helm chart WebD for Kubernetes ... version: 0.1.1 ... appVersion: "latest" #appVersion: ver1.7 #for vanilla argocd
$ cat webd-chart/values.yaml
...
replicaCount: 2
image:
repository: server.corpX.un:5000/student/webd
pullPolicy: Always
...
serviceAccount:
create: false
...
service:
# type: NodePort
...
ingress:
enabled: true
className: "nginx"
...
hosts:
- host: webd.corpX.un
...
# tls: []
# tls:
# - secretName: gowebd-tls
# hosts:
# - gowebd.corpX.un
...
#APWEBD_HOSTNAME: "apwebd.corp13.un"
#KEYCLOAK_HOSTNAME: "keycloak.corp13.un"
#REALM_NAME: "corp13"
$ less webd-chart/templates/deployment.yaml
...
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
# env:
# - name: APWEBD_HOSTNAME
# value: "{{ .Values.APWEBD_HOSTNAME }}"
# - name: KEYCLOAK_HOSTNAME
# value: "{{ .Values.KEYCLOAK_HOSTNAME }}"
# - name: REALM_NAME
# value: "{{ .Values.REALM_NAME }}"
...
# livenessProbe:
# httpGet:
# path: /
# port: http
# readinessProbe:
# httpGet:
# path: /
# port: http
...
$ helm template my-webd webd-chart/ | less $ helm install my-webd webd-chart/ -n my-ns --create-namespace --wait $ kubectl describe events -n my-ns | less $ export HELM_NAMESPACE=my-ns $ helm list $ ### helm upgrade my-webd webd-chart/ --set=image.tag=ver1.10 $ helm history my-webd $ helm rollback my-webd 1 $ helm uninstall my-webd
gitlab-runner@server:~/gowebd-k8s$ helm repo add --username student --password NNNNN-NNNNNNNNNNNNNNNNNNN webd http://server.corpX.un/api/v4/projects/N/packages/helm/stable "webd" has been added to your repositories gitlab-runner@server:~/gowebd-k8s$ ### helm repo remove webd gitlab-runner@server:~/gowebd-k8s$ helm repo list gitlab-runner@server:~/gowebd-k8s$ helm package webd-chart gitlab-runner@server:~/gowebd-k8s$ tar -tf webd-chart-0.1.1.tgz gitlab-runner@server:~/gowebd-k8s$ helm plugin install https://github.com/chartmuseum/helm-push gitlab-runner@server:~/gowebd-k8s$ helm cm-push webd-chart-0.1.1.tgz webd gitlab-runner@server:~/gowebd-k8s$ rm webd-chart-0.1.1.tgz
kube1:~# helm repo add webd http://server.corpX.un/api/v4/projects/N/packages/helm/stable kube1:~# helm repo update kube1:~# helm search repo webd kube1:~# helm repo update webd kube1:~# helm install my-webd webd/webd-chart kube1:~# ###helm uninstall my-webd
kube1:~# mkdir gowebd; cd gowebd kube1:~/gowebd# ###helm pull webd-chart --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable kube1:~/gowebd# helm show values webd-chart --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable | tee values.yaml.orig kube1:~/gowebd# cat values.yaml
replicaCount: 3 image: tag: "ver1.1" #REALM_NAME: "corp"
kube1:~/gowebd# helm upgrade my-webd -i webd-chart -f values.yaml -n my-ns --create-namespace --repo https://server.corp13.un/api/v4/projects/1/packages/helm/stable $ curl http://kubeN -H "Host: gowebd.corpX.un" kube1:~/gowebd# ###helm uninstall my-webd -n my-ns
helm repo add gitlab https://charts.gitlab.io helm search repo -l gitlab/gitlab-runner helm show values gitlab/gitlab-runner | tee values.yaml gitlab-runner@server:~$ diff values.yaml values.yaml.orig
... gitlabUrl: http://server.corpX.un/ ... runnerRegistrationToken: "NNNNNNNNNNNNNNNNNNNNNNNN" ... 148,149c142 < create: true --- > create: false 325d317 < privileged = true 432c424 < allowPrivilegeEscalation: true --- > allowPrivilegeEscalation: false 435c427 < privileged: true --- > privileged: false
gitlab-runner@server:~$ helm upgrade -i gitlab-runner gitlab/gitlab-runner -f values.yaml -n gitlab-runner --create-namespace gitlab-runner@server:~$ kubectl get all -n gitlab-runner
$ helm search hub -o json wordpress | jq '.' | less $ helm repo add bitnami https://charts.bitnami.com/bitnami $ helm show values bitnami/wordpress
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml $ cat dashboard-user-role.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
$ kubectl apply -f dashboard-user-role.yaml
$ kubectl -n kubernetes-dashboard create token admin-user
$ kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d ; echo
cmder$ kubectl proxy
student@node2:~$ sudo apt install conntrack https://computingforgeeks.com/install-mirantis-cri-dockerd-as-docker-engine-shim-for-kubernetes/ ... wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz ... student@node2:~$ minikube start --driver=none --insecure-registry "server.corpX.un:5000"
student@node1:~$ minikube dashboard & ... Opening http://127.0.0.1:NNNNN/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser ... /home/mobaxterm> ssh -L NNNNN:localhost:NNNNN student@192.168.X.10 Теперь, та же ссылка работает на win host системе
student@node1:~$ tar -cvzf kube-config.tar.gz .kube/config .minikube/ca.crt .minikube/profiles/minikube gitlab-runner@server:~$ scp student@node1:kube-config.tar.gz . gitlab-runner@server:~$ tar -xvf kube-config.tar.gz gitlab-runner@server:~$ cat .kube/config
...
certificate-authority: /home/gitlab-runner/.minikube/ca.crt
...
client-certificate: /home/gitlab-runner/.minikube/profiles/minikube/client.crt
client-key: /home/gitlab-runner/.minikube/profiles/minikube/client.key
...
root@gate:~# curl -L https://github.com/kubernetes/kompose/releases/download/v1.26.0/kompose-linux-amd64 -o kompose root@gate:~# chmod +x kompose root@gate:~# sudo mv ./kompose /usr/local/bin/kompose
gitlab-runner@gate:~/webd$ kompose convert gitlab-runner@gate:~/webd$ ls *yaml gitlab-runner@gate:~/webd$ kubectl apply -f sftp-deployment.yaml,vol1-persistentvolumeclaim.yaml,webd-service.yaml,sftp-service.yaml,webd-deployment.yaml gitlab-runner@gate:~/webd$ kubectl get all