This is an old revision of the document!
user1@client1:~$ cat user1.req | base64 -w0
kube1:~/users# kubectl explain csr.spec.usages kube1:~/users# cat user1.req.yaml
apiVersion: certificates.k8s.io/v1 kind: CertificateSigningRequest metadata: name: user1 spec: request: LS0t...S0tCg== signerName: kubernetes.io/kube-apiserver-client expirationSeconds: 8640000 # 100 * one day usages: # - digital signature # - key encipherment - client auth
kube1:~/users# kubectl apply -f user1.req.yaml
kube1:~/users# kubectl describe csr/user1
kube1:~/users# kubectl certificate approve user1
kube1:~/users# kubectl get csr
kube1:~/users# kubectl get csr/user1 -o yaml
kube1:~/users# kubectl get csr/user1 -o jsonpath="{.status.certificate}" | base64 -d | tee user1.crt
user1@client1:~$ scp root@kube1:users/user1.crt .
kube1:~/users# kubectl delete csr user1
kube1:~# kubectl api-resources -o wide | less APIVERSION = <group> + "/" + <version of the API> kube1:~/users# cat lh-svc-proxy-role.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: namespace: longhorn-system name: lh-svc-proxy-role rules: - apiGroups: [""] resources: ["services/proxy"] verbs: ["get"]
kube1:~/users# cat user1-lh-svc-proxy-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: user1-lh-svc-proxy-rolebinding namespace: longhorn-system subjects: - kind: User name: user1 apiGroup: rbac.authorization.k8s.io roleRef: kind: Role name: lh-svc-proxy-role apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f lh-svc-proxy-role.yaml,user1-lh-svc-proxy-rolebinding.yaml student@client1:~$ kubectl proxy student@client1:~$ curl http://localhost:8001/api/v1/namespaces/longhorn-system/services/longhorn-frontend:80/proxy/ student@client1:~$ curl http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ kube1:~/users# kubectl delete -f lh-svc-proxy-role.yaml,user1-lh-svc-proxy-rolebinding.yaml
kube1:~/users# cat ns-full-access.yaml
--- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: ns-full-access namespace: my-ns rules: - apiGroups: ["*"] resources: ["*"] verbs: ["*"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: ns-full-access-rolebinding namespace: my-ns subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: cko #kind: User #name: user1 roleRef: kind: Role name: ns-full-access apiGroup: rbac.authorization.k8s.io #roleRef: #apiGroup: rbac.authorization.k8s.io #kind: ClusterRole #name: admin
kube1:~/users# kubectl apply -f ns-full-access.yaml
kube1:~/users# kubectl get rolebindings --all-namespaces -o=json | jq '.items[] | select(.subjects[]?.name == "user1")' kube1:~/users# kubectl get rolebindings --all-namespaces -o=json | jq '.items[] | select(.subjects[]?.name == "cko")' kube1:~/users# kubectl delete -f ns-full-access.yaml ИЛИ kube1:~/users# kubectl -n my-ns delete rolebindings ns-full-access-rolebinding kube1:~/users# kubectl -n my-ns delete role ns-full-access
kube1:~/users# cat svc-pfw-role.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole #kind: Role metadata: name: svc-pfw-role # namespace: my-pgcluster-ns rules: - apiGroups: [""] resources: ["services"] verbs: ["get"] - apiGroups: [""] resources: ["pods"] verbs: ["get", "list"] - apiGroups: [""] resources: ["pods/portforward"] verbs: ["create"]
kube1:~/users# cat user1-svc-pfw-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding #kind: RoleBinding metadata: name: user1-svc-pfw-rolebinding # namespace: my-pgcluster-ns subjects: - kind: User name: user1 apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole # kind: Role name: svc-pfw-role apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f svc-pfw-role.yaml,user1-svc-pfw-rolebinding.yaml student@client1:~$ kubectl port-forward -n my-pgcluster-ns services/my-pgcluster-rw 5432:5432 student@client1:~$ psql postgres://keycloak:strongpassword@127.0.0.1:5432/keycloak
kube1:~/users# kubectl delete -f svc-pfw-role.yaml,user1-svc-pfw-rolebinding.yaml
kube1:~/users# kubectl get clusterroles | less kube1:~/users# kubectl get clusterrole cluster-admin -o yaml kube1:~/users# kubectl get clusterrolebindings | less kube1:~/users# kubectl get clusterrolebindings cluster-admin -o yaml kube1:~/users# cat user1-cluster-admin.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: user1-cluster-admin subjects: - kind: User name: user1 # name: user1@corp13.un apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f user1-cluster-admin.yaml student@client1:~$ kubectl get nodes
kube1:~/users# kubectl get clusterrolebindings -o=json | jq '.items[] | select(.subjects[]?.name == "kubeadm:cluster-admins")' kube1:~/users# kubectl get clusterrolebindings -o=json | jq '.items[] | select(.subjects[]?.name == "user1")' kube1:~/users# kubectl get clusterrolebindings -o=json | jq '.items[] | select(.subjects[]?.name == "default")' kube1:~/users# kubectl delete -f user1-cluster-admin.yaml ИЛИ kube1:~/users# kubectl delete clusterrolebindings user1-cluster-admin
kube1:~/users# kubectl delete serviceaccounts admin-user
kube1:~/users# kubectl -n my-ns describe pod my-debian | grep -i account kube1:~/users# kubectl -n my-ns exec -ti pods/my-debian -- bash root@my-debian:/# apt update && apt install kubectl
kube1:~/users# kubectl auth can-i get pods --as=system:serviceaccount:my-ns:default kube1:~/users# cat sa-default-cluster-admin.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: sa-default-cluster-admin subjects: - kind: ServiceAccount name: default namespace: my-ns roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f sa-default-cluster-admin.yaml
Проверяем доступ к API из приложения
kube1:~/users# kubectl delete -f sa-default-cluster-admin.yaml kube1:~/users# kubectl -n my-ns delete pod my-debian
user1@client1:~$ rm -v user1* user1@client1:~$ rm -rfv .kube/ kube1:~/users# kubectl delete -f ns-full-access.yaml kube1:~# rm -rfv users/ kube1:~# ???rm -rfv dashboard/ kube1:~# kubectl delete serviceaccounts admin-user kube1:~# kubectl delete pod my-debian -n my-ns
https://medium.com/@amirhosseineidy/kubernetes-authentication-with-keycloak-oidc-63571eaeed61
https://vlasov.pro/ru/p/kubernetes-oidc/
https://github.com/int128/kubelogin
https://timeweb.cloud/docs/k8s/connect-oidc-provider-to-cluster
? https://www.talkingquickly.co.uk/setting-up-oidc-login-kubernetes-kubectl-with-keycloak
Email verified
kube1:~/users# vim /etc/kubernetes/manifests/kube-apiserver.yaml
...
spec:
containers:
- command:
- kube-apiserver
- --oidc-issuer-url=https://keycloak.corp13.un/realms/corp13
#- --oidc-client-id=account
- --oidc-client-id=any-client
- --oidc-username-claim=email
#- --oidc-username-claim=preferred_username
- --oidc-groups-claim=groups
...
kube1:~/users# kubectl -n kube-system logs Pod/kube-apiserver-kube1
...
E1203 05:22:46.412571 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, oidc: verify token: oidc: expected audience \"any-client\" got [\"account\"]]"
...
user1@client1:~$ cat .kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ...
server: https://192.168.13.221:6443
name: cluster.local
contexts:
- context:
cluster: cluster.local
user: user1
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: user1
user:
auth-provider:
config:
client-id: any-client
client-secret: anystring
grant-type: password
#id-token:
idp-issuer-url: https://keycloak.corp13.un/realms/corp13
#refresh-token:
name: oidc
https://habr.com/ru/companies/slurm/articles/711868/|Журналы аудита Kubernetes: лучшие практики и настройка
kube1:~# cat /etc/kubernetes/audit-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: None
verbs: ["get", "watch", "list"]
- level: None
resources:
- group: "" # core
resources: ["events"]
- level: None
users:
- "system:kube-scheduler"
- "system:kube-proxy"
- "system:apiserver"
- "system:kube-controller-manager"
- "system:serviceaccount:gatekeeper-system:gatekeeper-admin"
- level: None
userGroups: ["system:nodes"]
- level: RequestResponse
kube1:~# kubectl apply -f /etc/kubernetes/audit-policy.yaml --dry-run=client
error: resource mapping not found for name...
https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/|Kubernetes Documentation - Auditing
kube1:~# vim /etc/kubernetes/manifests/kube-apiserver.yaml
...
- --audit-log-maxage=1
...
kube1:~# date
kube1:~# journalctl -u kubelet --since "2025-12-01 10:19:00" | cat | less
...
..."SyncLoop REMOVE" source="file" pods=["kube-system/kube-apiserver-kube1"]...
...
..."Killing container with a grace period" pod="kube-system/kube-apiserver-kube1"...
kube1:~# tail -f /var/log/kubernetes/audit/audit.log | jq
user1@client1:~$ kubectl -n my-apwebd-ns delete pod my-webd-<TAB>