Сервис PostgreSQL
Установка PostgreSQL
Linux Debian/Ubuntu
server# apt install postgresql postgresql-contrib
Docker
Kubernetes
Helm
kube1:~/my-postgres# helm show values oci://registry-1.docker.io/bitnamicharts/postgresql --version 16.6.2 | tee values.yaml.orig
kube1:~/my-postgres# cat values.yaml
auth:
postgresPassword: "strongpassword"
kube1:~/my-postgres# helm upgrade my-postgres -i -f values.yaml oci://registry-1.docker.io/bitnamicharts/postgresql -n my-postgres-ns --create-namespace --version 16.6.2
kube1:~# kubectl -n my-postgres-ns exec -ti pod/my-postgres-postgresql-0 -- /opt/bitnami/scripts/postgresql/entrypoint.sh psql
kube1:~# kubectl -n my-postgres-ns exec -ti pod/my-postgres-postgresql-0 -- env PGPASSWORD="strongpassword" psql -U postgres
kube1:~# ###psql postgres://postgres:strongpassword@my-postgres-postgresql.my-postgres-ns:5432/postgres
kube1:~# kubectl port-forward -n my-postgres-ns --address 0.0.0.0 services/my-postgres-postgresql 5432:5432
server# docker run -it --rm postgres:17 psql postgres://postgres:strongpassword@kube1:5432/postgres
CloudNativePG
kube1:~# kubectl apply --server-side -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.25/releases/cnpg-1.25.1.yaml
kube1:~# kubectl -n cnpg-system get all
kube1:~/my-pgcluster# echo -n keycloak | base64
kube1:~/my-pgcluster# echo -n strongpassword | base64
kube1:~/my-pgcluster# cat keycloak-db-secret.yaml
apiVersion: v1
data:
username: a2V5Y2xvYWs=
password: c3Ryb25ncGFzc3dvcmQ=
kind: Secret
metadata:
name: keycloak-db-secret
type: kubernetes.io/basic-auth
kube1:~/my-pgcluster# cat my-pgcluster.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: my-pgcluster
spec:
instances: 3
bootstrap:
initdb:
database: keycloak
owner: keycloak
secret:
name: keycloak-db-secret
storage:
size: 10Gi
kube1:~/my-pgcluster# kubectl create ns my-pgcluster-ns
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns apply -f keycloak-db-secret.yaml,my-pgcluster.yaml
kube1# watch kubectl -n my-pgcluster-ns get all
kube1# ###kubectl -n my-pgcluster-ns exec -it pod/my-pgcluster-1 -- psql
kube1# ###psql postgres://keycloak:strongpassword@my-pgcluster-rw.my-pgcluster-ns:5432/keycloak
Плагин cnpg
kube1:~# wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.25.1/kubectl-cnpg_1.25.1_linux_x86_64.deb -O kube-plugin.deb
kube1:~# dpkg -i kube-plugin.deb
kube1:~# kubectl cnpg status my-pgcluster -n my-pgcluster-ns
kube1:~# kubectl cnpg logs cluster my-pgcluster -n my-pgcluster-ns
Клиент psql
# ###apt install postgresql-client
# sudo -u postgres psql
postgres=# ALTER USER postgres WITH PASSWORD 'strongpassword';
postgres=# CREATE DATABASE keycloak;
postgres=# \l
postgres=# CREATE USER keycloak WITH PASSWORD 'strongpassword';
postgres=# \dg
postgres=# ALTER DATABASE keycloak OWNER TO keycloak;
postgres=# \c keycloak
keycloak=# \dt
keycloak=# \d user_entity
keycloak=# SELECT id, username FROM user_entity;
keycloak=# SELECT * FROM user_entity WHERE username='admin';
Клиент pgadmin
server# mkdir -p /private/var/lib/pgadmin; chmod 777 /private/var/lib/pgadmin
server# docker run -p 8081:80 -e 'PGADMIN_DEFAULT_EMAIL=postgres@corp13.un' -e 'PGADMIN_DEFAULT_PASSWORD=strongpassword' -v /private/var/lib/pgadmin:/var/lib/pgadmin --name pgadmin -d dpage/pgadmin4
server# docker logs pgadmin -f
...
... Listening at: http://[::]:80
...
Подключение по сети
server# cat /etc/postgresql/15/main/postgresql.conf
...
listen_addresses = '*'
...
# cat /etc/postgresql/15/main/pg_hba.conf
...
host all all 0.0.0.0/0 scram-sha-256
server# systemctl restart postgresql
client$ psql postgres://postgres:strongpassword@IP_OR_DNS_NAME:5432/postgres #-c "\dt"
PgBouncer
CloudNativePG PgBouncer
kube1:~/my-pgcluster# cat my-pgpooler.yaml
apiVersion: postgresql.cnpg.io/v1
kind: Pooler
metadata:
name: my-pgpooler
spec:
cluster:
name: my-pgcluster
instances: 3
type: rw
pgbouncer:
poolMode: session
parameters:
ignore_startup_parameters: search_path,extra_float_digits,options
max_client_conn: "1000"
default_pool_size: "10"
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns apply -f my-pgpooler.yaml
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns get all
Потоковая репликация
Резервное копирование и восстановление
pg_dump
root@d74a54cb5501:/# ###pg_dump -F c -d keycloak -U keycloak > /var/lib/postgresql/data/keycloak.dump
gate# sudo -u postgres pg_dump -F c -d keycloak > /tmp/keycloak.dump
gate# sudo -u postgres pg_dump -d keycloak > /tmp/keycloak.sql
gate# ###scp /tmp/keycloak.* kube1:
pg_restore
kube1# kubectl -n my-postgres-ns cp ~/keycloak.dump my-postgres-postgresql-0:/bitnami/postgresql/
kube1# kubectl -n my-postgres-ns exec -ti pod/my-postgres-postgresql-0 -- /opt/bitnami/scripts/postgresql/entrypoint.sh bash
postgres@my-postgres-postgresql-0:/$ psql -c "CREATE USER keycloak WITH PASSWORD 'strongpassword';"
postgres@my-postgres-postgresql-0:/$ pg_restore -C -d postgres /bitnami/postgresql/keycloak.dump
или
kube1:~# kubectl -n my-postgres-ns exec -ti pod/my-postgres-postgresql-0 -- env PGPASSWORD="strongpassword" psql -U postgres -c "CREATE USER keycloak WITH PASSWORD 'strongpassword';"
kube1:~# ssh gate.corp13.un cat /tmp/keycloak.dump | kubectl -n my-postgres-ns exec -i pod/my-postgres-postgresql-0 -- env PGPASSWORD="strongpassword" pg_restore -C -d postgres -U postgres
pg_dumpall
kube1:~# kubectl -n my-postgres-ns exec -i pod/my-postgres-postgresql-0 -- env PGPASSWORD="strongpassword" pg_dumpall -U postgres | tee ~/all.sql
kube1:~# kubectl delete ns my-postgres-ns
Снова разворачиваем через
Helm
kube1:~# cat ~/all.sql | kubectl -n my-postgres-ns exec -i pod/my-postgres-postgresql-0 -- env PGPASSWORD="strongpassword" psql -U postgres -f -
kube1:~# cp all.sql keycloak.sql
kube1:~# cat keycloak.sql
\connect keycloak
...
-- PostgreSQL database dump complete
kube1:~# cat ~/keycloak.sql | kubectl -n my-pgcluster-ns exec -i pod/my-pgcluster-1 -- psql -f -
Barman
CloudNativePG Backup
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns create secret generic minio-secret \
--from-literal=ACCESS_KEY_ID=root \
--from-literal=ACCESS_SECRET_KEY=strongpassword
kube1:~/my-pgcluster# cat my-pgcluster.yaml
...
backup:
barmanObjectStore:
destinationPath: s3://postgresql/
endpointURL: http://minio.corp13.un:9000
s3Credentials:
accessKeyId:
name: minio-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: minio-secret
key: ACCESS_SECRET_KEY
wal:
compression: gzip
retentionPolicy: "3d"
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns apply -f my-pgcluster.yaml
kube1:~/my-pgcluster# kubectl cnpg status my-pgcluster -n my-pgcluster-ns
kube1:~/my-pgcluster# cat my-scheduled-backup.yaml
apiVersion: postgresql.cnpg.io/v1
kind: ScheduledBackup
metadata:
name: my-scheduled-backup
spec:
schedule: "0 0 0 * * *"
immediate: true
backupOwnerReference: self
cluster:
name: my-pgcluster
kube1:~/my-pgcluster# kubectl -n my-pgcluster-ns apply -f my-scheduled-backup.yaml
kube1# kubectl -n my-pgcluster-ns describe backups.postgresql.cnpg.io my-scheduled-backup
kube1# kubectl -n my-pgcluster-ns get backups.postgresql.cnpg.io
CloudNativePG Recovery
kube1:~# kubectl create ns my-pgcluster-ns
kube1:~# kubectl -n my-pgcluster-ns create secret generic minio-secret \
--from-literal=ACCESS_KEY_ID=root \
--from-literal=ACCESS_SECRET_KEY=strongpassword
kube1:~/my-pgcluster# cat my-pgcluster.yaml
...
bootstrap:
recovery:
source: my-pgcluster
externalClusters:
- name: my-pgcluster
barmanObjectStore:
destinationPath: s3://postgresql/
endpointURL: http://minio.corp13.un:9000
s3Credentials:
accessKeyId:
name: minio-secret
key: ACCESS_KEY_ID
secretAccessKey:
name: minio-secret
key: ACCESS_SECRET_KEY
wal:
compression: gzip
# backup:
# ...
Мониторинг PostgreSQL
CloudNativePG Monitoring