This is an old revision of the document!
kube1:~/users# kubectl delete serviceaccounts admin-user
kube1:~/users# kubectl -n my-ns describe pod my-debian | grep -i account kube1:~/users# kubectl -n my-ns exec -ti pods/my-debian -- bash root@my-debian:/# apt update && apt install kubectl
kube1:~/users# kubectl auth can-i get pods --as=system:serviceaccount:my-ns:default kube1:~/users# cat sa-default-cluster-admin.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: sa-default-cluster-admin subjects: - kind: ServiceAccount name: default namespace: my-ns roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f sa-default-cluster-admin.yaml
Проверяем доступ к API из приложения
kube1:~/users# kubectl delete -f sa-default-cluster-admin.yaml kube1:~/users# kubectl -n my-ns delete pod my-debian
user1@client1:~$ rm -v user1* user1@client1:~$ rm -rfv .kube/ kube1:~# rm -rfv users/
user1@client1:~$ cp -rv .kube.iodc/ .kube/
user1@client1:~$ kubelogin user1@client1:~$ kubectl auth whoami
https://medium.com/@amirhosseineidy/kubernetes-authentication-with-keycloak-oidc-63571eaeed61
https://vlasov.pro/ru/p/kubernetes-oidc/
https://github.com/int128/kubelogin
https://timeweb.cloud/docs/k8s/connect-oidc-provider-to-cluster
? https://www.talkingquickly.co.uk/setting-up-oidc-login-kubernetes-kubectl-with-keycloak
Email verified
kube1:~/users# vim /etc/kubernetes/manifests/kube-apiserver.yaml
...
spec:
containers:
- command:
- kube-apiserver
- --oidc-issuer-url=https://keycloak.corp13.un/realms/corp13
#- --oidc-client-id=account
- --oidc-client-id=any-client
- --oidc-username-claim=email
#- --oidc-username-claim=preferred_username
- --oidc-groups-claim=groups
...
kube1:~/users# kubectl -n kube-system logs Pod/kube-apiserver-kube1
...
E1203 05:22:46.412571 1 authentication.go:73] "Unable to authenticate the request" err="[invalid bearer token, oidc: verify token: oidc: expected audience \"any-client\" got [\"account\"]]"
...
user1@client1:~$ cat .kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ...
server: https://192.168.13.221:6443
name: cluster.local
contexts:
- context:
cluster: cluster.local
user: user1
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: user1
user:
auth-provider:
config:
client-id: any-client
client-secret: anystring
grant-type: password
#id-token:
idp-issuer-url: https://keycloak.corp13.un/realms/corp13
#refresh-token:
name: oidc
https://habr.com/ru/companies/slurm/articles/711868/|Журналы аудита Kubernetes: лучшие практики и настройка
kube1:~# cat /etc/kubernetes/audit-policy.yaml
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: None
verbs: ["get", "watch", "list"]
- level: None
resources:
- group: "" # core
resources: ["events"]
- level: None
users:
- "system:kube-scheduler"
- "system:kube-proxy"
- "system:apiserver"
- "system:kube-controller-manager"
- "system:serviceaccount:gatekeeper-system:gatekeeper-admin"
- level: None
userGroups: ["system:nodes"]
- level: RequestResponse
kube1:~# kubectl apply -f /etc/kubernetes/audit-policy.yaml --dry-run=client
error: resource mapping not found for name...
https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/|Kubernetes Documentation - Auditing
kube1:~# vim /etc/kubernetes/manifests/kube-apiserver.yaml
...
- --audit-log-maxage=1
...
kube1:~# date
kube1:~# journalctl -u kubelet --since "2025-12-01 10:19:00" | cat | less
...
..."SyncLoop REMOVE" source="file" pods=["kube-system/kube-apiserver-kube1"]...
...
..."Killing container with a grace period" pod="kube-system/kube-apiserver-kube1"...
kube1:~# tail -f /var/log/kubernetes/audit/audit.log | jq
user1@client1:~$ kubectl -n my-apwebd-ns delete pod my-webd-<TAB>