This is an old revision of the document!
user2@server:~$ cat user2.req | base64 -w0
kube1:~/users# kubectl explain csr.spec.usages
kube1:~/users# cat user2.req.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: user2
spec:
request: LS0t...S0tCg==
signerName: kubernetes.io/kube-apiserver-client
expirationSeconds: 8640000 # 100 * one day
usages:
# - digital signature
# - key encipherment
- client auth
kube1:~/users# kubectl apply -f user2.req.yaml
kube1:~/users# kubectl describe csr/user2
kube1:~/users# kubectl certificate approve user2
kube1:~/users# kubectl get csr
kube1:~/users# kubectl get csr/user2 -o yaml
kube1:~/users# kubectl get csr/user2 -o jsonpath="{.status.certificate}" | base64 -d | tee user2.crt
user2@server:~$ scp root@kube1:users/user2.crt .
user2@server:~$ kubectl config set-cluster cluster.local --insecure-skip-tls-verify=true --server=https://192.168.13.221:6443 user2@server:~$ cat .kube/config user2@server:~$ kubectl config set-credentials user2 --client-certificate=user2.crt --client-key=user2.key --embed-certs=true ИЛИ student@client1:~$ kubectl config set-credentials user2 --token=................................... user2@server:~$ kubectl config set-context default-context --cluster=cluster.local --user=user2 user2@server:~$ kubectl config use-context default-context user2@server:~$ kubectl auth whoami user2@server:~$ kubectl get pods Error from server (Forbidden) или ...
kube1:~/users# cat lh-svc-proxy-role.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: namespace: longhorn-system name: lh-svc-proxy-role rules: - apiGroups: [""] resources: ["services/proxy"] verbs: ["get"]
kube1:~/users# cat user2-lh-svc-proxy-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: user2-lh-svc-proxy-rolebinding namespace: longhorn-system subjects: - kind: User name: user2 apiGroup: rbac.authorization.k8s.io roleRef: kind: Role name: lh-svc-proxy-role apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f lh-svc-proxy-role.yaml,user2-lh-svc-proxy-rolebinding.yaml student@client1:~$ kubectl proxy student@client1:~$ curl http://localhost:8001/api/v1/namespaces/longhorn-system/services/longhorn-frontend:80/proxy/ kube1:~/users# kubectl get rolebindings --all-namespaces -o=json | jq '.items[] | select(.subjects[]?.name == "user2")'
kube1:~/users# kubectl get clusterroles |less kube1:~/users# kubectl get clusterrolebindings cluster-admin -o yaml kube1:~/users# kubectl get clusterrole view -o yaml kube1:~/users# cat user2-view-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: user2-view-clusterrolebinding subjects: - kind: User name: user2 apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: view apiGroup: rbac.authorization.k8s.io
kube1:~/users# kubectl apply -f user2-view-clusterrolebinding.yaml student@client1:~$ kubectl get pods -A student@client1:~$ kubectl port-forward -n my-pgcluster-ns services/my-pgcluster-rw 5432:5432 error: error upgrading connection: pods "my-pgcluster-3" is forbidden: User "user2" cannot create resource "pods/portforward" in API group "" in the namespace "my-pgcluster-ns" kube1:~/users# cat svc-pfw-role.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole #kind: Role metadata: name: svc-pfw-role # namespace: my-pgcluster-ns rules: - apiGroups: [""] resources: ["services"] verbs: ["get"] - apiGroups: [""] resources: ["pods"] verbs: ["get", "list"] - apiGroups: [""] resources: ["pods/portforward"] verbs: ["create"]
kube1:~/users# cat user2-svc-pfw-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding #kind: RoleBinding metadata: name: user2-svc-pfw-rolebinding # namespace: my-pgcluster-ns subjects: - kind: User name: user2 apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole # kind: Role name: svc-pfw-role apiGroup: rbac.authorization.k8s.io kube1:~/users# kubectl apply -f svc-pfw-role.yaml,user2-svc-pfw-rolebinding.yaml student@client1:~$ kubectl port-forward -n my-pgcluster-ns services/my-pgcluster-rw 5432:5432 student@client1:~$ psql postgres://keycloak:strongpassword@127.0.0.1:5432/postgres kube1:~/users# kubectl get clusterrolebindings -o=json | jq '.items[] | select(.subjects[]?.name == "user2")'