From 58e463ef9ceca752c7aef25da5279bed89e5cadd Mon Sep 17 00:00:00 2001 From: sergey grinko Date: Wed, 14 Feb 2024 16:06:13 +0300 Subject: [PATCH] =?UTF-8?q?=D0=94=D0=BE=D0=B1=D0=B0=D0=B2=D0=BB=D0=B5?= =?UTF-8?q?=D0=BD=D1=8B=20=D0=BD=D0=BE=D0=B2=D1=8B=D0=B5=20=D0=B2=D0=B5?= =?UTF-8?q?=D1=80=D1=81=D0=B8=D0=B8=20=D0=BA=D0=BE=D0=BD=D1=82=D0=B5=D0=B9?= =?UTF-8?q?=D0=BD=D0=B5=D1=80=D0=BE=D0=B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- 12/backup-service.yml | 2 +- 12/bin/clear_all_docker.sh | 1 + 12/bin/docker_start.sh | 2 +- 12/bin/harbor_push.sh | 2 +- 12/bin/hub_push.sh | 2 +- 12/docker-mamonsu/bootstrap_post.sql | 2 +- 12/docker-mamonsu/metrics.ru.md | 164 ++-- 12/docker-pgprobackup/backup.sh | 35 +- 12/docker-postgres/Dockerfile | 8 +- 12/docker-postgres/backup.sh | 34 +- 12/docker-postgres/sql/user_lookup.sql | 18 +- 12/docker_start.sh | 2 +- 12/postgres-service.yml | 4 +- 12/postgres-service_all.yml | 6 +- 12/postgres-service_pgb.yml | 6 +- 13/backup-service.yml | 2 +- 13/bin/clear_all_docker.sh | 1 + 13/bin/docker_start.sh | 2 +- 13/bin/harbor_push.sh | 2 +- 13/bin/hub_push.sh | 2 +- 13/docker-mamonsu/bootstrap_post.sql | 2 +- 13/docker-mamonsu/metrics.ru.md | 164 ++-- 13/docker-pgprobackup/backup.sh | 35 +- 13/docker-postgres/Dockerfile | 8 +- 13/docker-postgres/backup.sh | 34 +- 13/docker-postgres/sql/user_lookup.sql | 18 +- 13/docker_start.sh | 2 +- 13/postgres-service.yml | 4 +- 13/postgres-service_all.yml | 6 +- 13/postgres-service_pgb.yml | 4 +- 14/backup-service.yml | 2 +- 14/bin/clear_all_docker.sh | 1 + 14/bin/docker_start.sh | 2 +- 14/bin/harbor_push.sh | 2 +- 14/bin/hub_push.sh | 2 +- 14/docker-mamonsu/bootstrap_post.sql | 2 +- 14/docker-mamonsu/metrics.ru.md | 164 ++-- 14/docker-pgprobackup/backup.sh | 35 +- 14/docker-postgres/Dockerfile | 8 +- 14/docker-postgres/backup.sh | 34 +- 14/docker-postgres/sql/user_lookup.sql | 18 +- 14/docker_start.sh | 2 +- 14/postgres-service.yml | 4 +- 14/postgres-service_all.yml | 6 +- 14/postgres-service_pgb.yml | 6 +- 15/backup-service.yml | 2 +- 15/bin/clear_all_docker.sh | 1 + 15/bin/docker_start.sh | 2 +- 15/bin/harbor_push.sh | 2 +- 15/bin/hub_push.sh | 2 +- 15/bin/upgrade_start.sh | 14 +- 15/docker-analyze/Dockerfile | 2 +- 15/docker-mamonsu/bootstrap_post.sql | 2 +- 15/docker-mamonsu/metrics.ru.md | 164 ++-- 15/docker-pgprobackup/Dockerfile | 2 +- 15/docker-pgprobackup/backup.sh | 35 +- 15/docker-pgprocheckdb/Dockerfile | 2 +- 15/docker-pgprorestore/Dockerfile | 2 +- 15/docker-pgupgrade/Dockerfile | 10 +- 15/docker-postgres/Dockerfile | 8 +- 15/docker-postgres/backup.sh | 34 +- 15/docker-postgres/sql/user_lookup.sql | 18 +- 15/docker_start.sh | 2 +- 15/postgres-pgupgrade.yml | 2 +- 15/postgres-service.yml | 4 +- 15/postgres-service_all.yml | 6 +- 15/postgres-service_pgb.yml | 6 +- 16/analyze-service.yml | 23 + 16/backup-service.yml | 24 + 16/bin/analyze_start.sh | 7 + 16/bin/backup_start.sh | 7 + 16/bin/check_cluster_start.sh | 7 + 16/bin/clear_all_docker.sh | 6 + 16/bin/docker_build.sh | 29 + 16/bin/docker_start.sh | 9 + 16/bin/harbor_push.sh | 43 + 16/bin/hub_push.sh | 41 + 16/bin/postgres_start.sh | 8 + 16/bin/postgres_start_all.sh | 11 + 16/bin/postgres_start_pgb.sh | 11 + 16/bin/restore_start.sh | 7 + 16/bin/show_start.sh | 7 + 16/bin/upgrade_start.sh | 14 + 16/check_cluster_service.yml | 27 + 16/docker-analyze/Dockerfile | 47 + 16/docker-analyze/analyze_log.sh | 100 ++ .../pg_stat_statements_report.sql | 172 ++++ 16/docker-mamonsu/Dockerfile | 74 ++ 16/docker-mamonsu/agent.conf | 211 +++++ 16/docker-mamonsu/bootstrap_post.sql | 1 + 16/docker-mamonsu/mamonsu_right_add.sql | 32 + 16/docker-mamonsu/mamonsu_start.sh | 65 ++ 16/docker-mamonsu/metrics.ru.md | 82 ++ 16/docker-mamonsu/pg_jobs_check.py | 75 ++ 16/docker-mamonsu/pg_partition.py | 126 +++ 16/docker-mamonsu/pg_probackup.py | 326 +++++++ 16/docker-mamonsu/pre.sql | 34 + 16/docker-pgprobackup/Dockerfile | 52 ++ 16/docker-pgprobackup/backup.sh | 122 +++ 16/docker-pgprobackup/show.sh | 56 ++ 16/docker-pgprobackup/sql/first_db.sql | 1 + 16/docker-pgprocheckdb/Dockerfile | 53 ++ 16/docker-pgprocheckdb/check_cluster.sh | 169 ++++ 16/docker-pgprocheckdb/show.sh | 56 ++ 16/docker-pgprocheckdb/sql/first_db.sql | 1 + 16/docker-pgprorestore/Dockerfile | 47 + 16/docker-pgprorestore/restore.sh | 49 + 16/docker-pgprorestore/show.sh | 56 ++ 16/docker-pgupgrade/Dockerfile | 196 ++++ 16/docker-pgupgrade/pg_hba.conf | 102 +++ 16/docker-pgupgrade/pg_ident.conf | 43 + 16/docker-pgupgrade/postgresql.conf | 861 ++++++++++++++++++ 16/docker-pgupgrade/upgrade.sh | 287 ++++++ 16/docker-postgres/Dockerfile | 160 ++++ 16/docker-postgres/backup.sh | 144 +++ 16/docker-postgres/initdb-extension.sh | 95 ++ 16/docker-postgres/locales.conf | 7 + 16/docker-postgres/pg_hba.conf | 102 +++ 16/docker-postgres/pg_ident.conf | 43 + 16/docker-postgres/postgres | 9 + 16/docker-postgres/postgresql.conf | 861 ++++++++++++++++++ 16/docker-postgres/show.sh | 56 ++ 16/docker-postgres/sql/background_start.sql | 12 + 16/docker-postgres/sql/db_all.sql | 68 ++ 16/docker-postgres/sql/db_notpostgres.sql | 289 ++++++ 16/docker-postgres/sql/db_postgres.sql | 53 ++ 16/docker-postgres/sql/db_target.sql | 14 + 16/docker-postgres/sql/first_db.sql | 1 + .../sql/inf_long_running_requests.sql | 105 +++ .../sql/inf_long_running_requests_13plus.sql | 107 +++ .../sql/init_db_2_dblink_fdw.sql | 59 ++ 16/docker-postgres/sql/init_db_3_cron.sql | 221 +++++ 16/docker-postgres/sql/init_db_4_fts.sql | 234 +++++ 16/docker-postgres/sql/post.sql | 68 ++ 16/docker-postgres/sql/post_warning.sql | 13 + 16/docker-postgres/sql/pre.sql | 129 +++ 16/docker-postgres/sql/replace_char_xml.sql | 5 + 16/docker-postgres/sql/send_email.sql | 95 ++ 16/docker-postgres/sql/user_lookup.sql | 9 + 16/docker-postgres/sql/vw_locks.sql | 26 + 16/docker-postgres/sql/vw_partitions.sql | 43 + 16/docker-postgres/sql/vw_who.sql | 20 + 16/docker-postgres/sql/vw_who_13plus.sql | 21 + 16/docker-postgres/sql/vw_who_tree.sql | 114 +++ 16/docker-postgres/sql/vw_who_tree_13plus.sql | 117 +++ 16/docker-postgres/sql/vw_who_tree_14plus.sql | 120 +++ 16/docker-postgres/update-extension.sh | 50 + 16/docker_start.sh | 13 + 16/postgres-pgupgrade.yml | 18 + 16/postgres-service.yml | 39 + 16/postgres-service_all.yml | 100 ++ 16/postgres-service_pgb.yml | 67 ++ 16/restore-service.yml | 20 + 16/show_backup-service.yml | 19 + 16/upgrade_start.sh | 8 + 155 files changed, 8169 insertions(+), 546 deletions(-) create mode 100644 16/analyze-service.yml create mode 100644 16/backup-service.yml create mode 100644 16/bin/analyze_start.sh create mode 100644 16/bin/backup_start.sh create mode 100644 16/bin/check_cluster_start.sh create mode 100644 16/bin/clear_all_docker.sh create mode 100644 16/bin/docker_build.sh create mode 100644 16/bin/docker_start.sh create mode 100644 16/bin/harbor_push.sh create mode 100644 16/bin/hub_push.sh create mode 100644 16/bin/postgres_start.sh create mode 100644 16/bin/postgres_start_all.sh create mode 100644 16/bin/postgres_start_pgb.sh create mode 100644 16/bin/restore_start.sh create mode 100644 16/bin/show_start.sh create mode 100644 16/bin/upgrade_start.sh create mode 100644 16/check_cluster_service.yml create mode 100644 16/docker-analyze/Dockerfile create mode 100644 16/docker-analyze/analyze_log.sh create mode 100644 16/docker-analyze/pg_stat_statements_report.sql create mode 100644 16/docker-mamonsu/Dockerfile create mode 100644 16/docker-mamonsu/agent.conf create mode 100644 16/docker-mamonsu/bootstrap_post.sql create mode 100644 16/docker-mamonsu/mamonsu_right_add.sql create mode 100644 16/docker-mamonsu/mamonsu_start.sh create mode 100644 16/docker-mamonsu/metrics.ru.md create mode 100644 16/docker-mamonsu/pg_jobs_check.py create mode 100644 16/docker-mamonsu/pg_partition.py create mode 100644 16/docker-mamonsu/pg_probackup.py create mode 100644 16/docker-mamonsu/pre.sql create mode 100644 16/docker-pgprobackup/Dockerfile create mode 100644 16/docker-pgprobackup/backup.sh create mode 100644 16/docker-pgprobackup/show.sh create mode 100644 16/docker-pgprobackup/sql/first_db.sql create mode 100644 16/docker-pgprocheckdb/Dockerfile create mode 100644 16/docker-pgprocheckdb/check_cluster.sh create mode 100644 16/docker-pgprocheckdb/show.sh create mode 100644 16/docker-pgprocheckdb/sql/first_db.sql create mode 100644 16/docker-pgprorestore/Dockerfile create mode 100644 16/docker-pgprorestore/restore.sh create mode 100644 16/docker-pgprorestore/show.sh create mode 100644 16/docker-pgupgrade/Dockerfile create mode 100644 16/docker-pgupgrade/pg_hba.conf create mode 100644 16/docker-pgupgrade/pg_ident.conf create mode 100644 16/docker-pgupgrade/postgresql.conf create mode 100644 16/docker-pgupgrade/upgrade.sh create mode 100644 16/docker-postgres/Dockerfile create mode 100644 16/docker-postgres/backup.sh create mode 100644 16/docker-postgres/initdb-extension.sh create mode 100644 16/docker-postgres/locales.conf create mode 100644 16/docker-postgres/pg_hba.conf create mode 100644 16/docker-postgres/pg_ident.conf create mode 100644 16/docker-postgres/postgres create mode 100644 16/docker-postgres/postgresql.conf create mode 100644 16/docker-postgres/show.sh create mode 100644 16/docker-postgres/sql/background_start.sql create mode 100644 16/docker-postgres/sql/db_all.sql create mode 100644 16/docker-postgres/sql/db_notpostgres.sql create mode 100644 16/docker-postgres/sql/db_postgres.sql create mode 100644 16/docker-postgres/sql/db_target.sql create mode 100644 16/docker-postgres/sql/first_db.sql create mode 100644 16/docker-postgres/sql/inf_long_running_requests.sql create mode 100644 16/docker-postgres/sql/inf_long_running_requests_13plus.sql create mode 100644 16/docker-postgres/sql/init_db_2_dblink_fdw.sql create mode 100644 16/docker-postgres/sql/init_db_3_cron.sql create mode 100644 16/docker-postgres/sql/init_db_4_fts.sql create mode 100644 16/docker-postgres/sql/post.sql create mode 100644 16/docker-postgres/sql/post_warning.sql create mode 100644 16/docker-postgres/sql/pre.sql create mode 100644 16/docker-postgres/sql/replace_char_xml.sql create mode 100644 16/docker-postgres/sql/send_email.sql create mode 100644 16/docker-postgres/sql/user_lookup.sql create mode 100644 16/docker-postgres/sql/vw_locks.sql create mode 100644 16/docker-postgres/sql/vw_partitions.sql create mode 100644 16/docker-postgres/sql/vw_who.sql create mode 100644 16/docker-postgres/sql/vw_who_13plus.sql create mode 100644 16/docker-postgres/sql/vw_who_tree.sql create mode 100644 16/docker-postgres/sql/vw_who_tree_13plus.sql create mode 100644 16/docker-postgres/sql/vw_who_tree_14plus.sql create mode 100644 16/docker-postgres/update-extension.sh create mode 100644 16/docker_start.sh create mode 100644 16/postgres-pgupgrade.yml create mode 100644 16/postgres-service.yml create mode 100644 16/postgres-service_all.yml create mode 100644 16/postgres-service_pgb.yml create mode 100644 16/restore-service.yml create mode 100644 16/show_backup-service.yml create mode 100644 16/upgrade_start.sh diff --git a/12/backup-service.yml b/12/backup-service.yml index 5e390fe..0f77223 100644 --- a/12/backup-service.yml +++ b/12/backup-service.yml @@ -20,5 +20,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" BACKUP_STREAM: "yes" diff --git a/12/bin/clear_all_docker.sh b/12/bin/clear_all_docker.sh index ac2cdc5..9d06db3 100644 --- a/12/bin/clear_all_docker.sh +++ b/12/bin/clear_all_docker.sh @@ -1,4 +1,5 @@ #!/bin/bash +# docker system prune -a # чистка всех образов в каталоге /var/lib/docker/overlay2 docker stop $(docker ps -q) docker rm -v $(docker ps -aq -f status=exited) docker rmi $(docker image ls -q) -f diff --git a/12/bin/docker_start.sh b/12/bin/docker_start.sh index 01c0b9f..123634c 100644 --- a/12/bin/docker_start.sh +++ b/12/bin/docker_start.sh @@ -4,6 +4,6 @@ docker run -p 127.0.0.1:5433:5432/tcp --shm-size 2147483648 \ -e POSTGRES_HOST_AUTH_METHOD=trust \ -e DEPLOY_PASSWORD=postgres \ -e TZ="Etc/UTC" \ - grufos/postgres:12.17 \ + grufos/postgres:12.18 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" \ -c shared_ispell.max_size=70MB diff --git a/12/bin/harbor_push.sh b/12/bin/harbor_push.sh index 2d5fff0..c3f1f42 100644 --- a/12/bin/harbor_push.sh +++ b/12/bin/harbor_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=12 MINOR=17 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" PROJECT=dba_postgres diff --git a/12/bin/hub_push.sh b/12/bin/hub_push.sh index 11f1d1f..7efa173 100644 --- a/12/bin/hub_push.sh +++ b/12/bin/hub_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=12 MINOR=17 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" ACCOUNT=grufos diff --git a/12/docker-mamonsu/bootstrap_post.sql b/12/docker-mamonsu/bootstrap_post.sql index e868c1f..07558c2 100644 --- a/12/docker-mamonsu/bootstrap_post.sql +++ b/12/docker-mamonsu/bootstrap_post.sql @@ -1 +1 @@ -select 'GRANT EXECUTE ON FUNCTION mamonsu.' || proname || '() TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec +select 'GRANT EXECUTE ON FUNCTION mamonsu.' || oid::regprocedure || ' TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec diff --git a/12/docker-mamonsu/metrics.ru.md b/12/docker-mamonsu/metrics.ru.md index 7058720..6dd04b9 100644 --- a/12/docker-mamonsu/metrics.ru.md +++ b/12/docker-mamonsu/metrics.ru.md @@ -1,82 +1,82 @@ -# Описания плагинов - -## pg_probackup.py -Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). -Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. - -### Настройки в секции [pgprobackup] - -| Наименование | Ключ | Описание | -| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | -| enabled | False | По умолчанию плагин отключен. Укажите True для включения | -| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | -| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | -| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | -| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | -| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | - - -### Текущие метрики в Discovery правиле: - -| Наименование | Ключ | Хранить | Описание | -| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | -| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | -| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | -| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | -| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | -| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | -| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | -| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | -| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | - - -### Текущие алерты в Discovery правиле: -Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: - -* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. - -| Категория | Детали | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Важность: | Warning | -| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | - -* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. - -| Категория | Детали | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Важность: | Warning | -| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | -| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | - -* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. - -| Категория | Детали | -| ------------- | ----------------------------------------------------------------------------------- | -| Важность: | Average | -| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | - - -### Текущие графики в Discovery правиле: - -1. Pg_probackup: backup dir: {#BACKUPDIR} size - -Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: - -| Метрика | Сторона графика | Описание | -| ------------------------------------------- | --------------- | -------------------------------------- | -| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | -| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | -| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | - -2. Pg_probackup: backup dir: {#BACKUPDIR} duration - -Показывает 2 метрики с длительностью создания архивных копий: - -| Метрика | Сторона графика | Описание | -| -------------------------------------------- | --------------- | -------------------------------------------------------- | -| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | -| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | +# Описания плагинов + +## pg_probackup.py +Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). +Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. + +### Настройки в секции [pgprobackup] + +| Наименование | Ключ | Описание | +| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | +| enabled | False | По умолчанию плагин отключен. Укажите True для включения | +| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | +| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | +| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | +| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | +| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | + + +### Текущие метрики в Discovery правиле: + +| Наименование | Ключ | Хранить | Описание | +| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | +| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | +| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | +| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | +| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | +| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | +| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | +| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | +| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | + + +### Текущие алерты в Discovery правиле: +Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: + +* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. + +| Категория | Детали | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Важность: | Warning | +| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | + +* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. + +| Категория | Детали | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Важность: | Warning | +| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | +| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | + +* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. + +| Категория | Детали | +| ------------- | ----------------------------------------------------------------------------------- | +| Важность: | Average | +| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | + + +### Текущие графики в Discovery правиле: + +1. Pg_probackup: backup dir: {#BACKUPDIR} size + +Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: + +| Метрика | Сторона графика | Описание | +| ------------------------------------------- | --------------- | -------------------------------------- | +| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | +| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | +| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | + +2. Pg_probackup: backup dir: {#BACKUPDIR} duration + +Показывает 2 метрики с длительностью создания архивных копий: + +| Метрика | Сторона графика | Описание | +| -------------------------------------------- | --------------- | -------------------------------------------------------- | +| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | +| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | diff --git a/12/docker-pgprobackup/backup.sh b/12/docker-pgprobackup/backup.sh index 6fbe28d..5f7cb6a 100644 --- a/12/docker-pgprobackup/backup.sh +++ b/12/docker-pgprobackup/backup.sh @@ -25,13 +25,16 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE fi if [ "$BACKUP_STREAM" = "" ]; then @@ -70,21 +73,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then touch $PGDATA/archive_active.trigger fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then echo "The initial backup must be type FULL ..." - /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS else - # Backup type depends on day or input parameter - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + else + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi STATUS=`/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS +else + # чистим старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS +fi + # collecting statistics on backups /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt diff --git a/12/docker-postgres/Dockerfile b/12/docker-postgres/Dockerfile index 695f32d..2bee462 100644 --- a/12/docker-postgres/Dockerfile +++ b/12/docker-postgres/Dockerfile @@ -4,7 +4,7 @@ # https://hub.docker.com/r/postgis/postgis # https://github.com/postgis/docker-postgis # -FROM postgres:12.17 +FROM postgres:12.18 LABEL maintainer="Sergey Grinko " @@ -16,7 +16,7 @@ ENV BACKUP_PATH /mnt/pgbak ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" ENV RUM_VERSION 1.3.13 -RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 \ +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ && apt-get update \ && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ # подключаем репозитарий архивной утилиты @@ -64,7 +64,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && make USE_PGXS=1 install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && make USE_PGXS=1 install \ @@ -102,7 +102,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && cd / \ && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ # ====== clean all unused package... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/12/docker-postgres/backup.sh b/12/docker-postgres/backup.sh index 5d4d510..f82006f 100644 --- a/12/docker-postgres/backup.sh +++ b/12/docker-postgres/backup.sh @@ -30,10 +30,6 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$BACKUP_STREAM" = "" ]; then BACKUP_STREAM="stream" fi @@ -51,15 +47,14 @@ fi if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full else # make an incremental backup on other days of the week - BACKUP_MODE=page + BACKUPMODE=page fi - -if [ "$1" != "" ]; then +if [ "$BACKUP_MODE" != "" ]; then # The backup creation mode is given forcibly - BACKUP_MODE=$1 + BACKUPMODE=$BACKUP_MODE fi BACKUP_STREAM="--stream" @@ -75,7 +70,6 @@ if [ "$3" != "" ]; then BACKUP_THREADS=$3 fi - cd $BACKUP_PATH COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` @@ -98,21 +92,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then su - postgres -c "touch $PGDATA/archive_active.trigger" fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then # Full backup needs to be forcibly - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" else # Backup type depends on day or input parameter - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + else + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi STATUS=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'"` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS" +else + # чистим старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS" +fi # collecting statistics on backups su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" diff --git a/12/docker-postgres/sql/user_lookup.sql b/12/docker-postgres/sql/user_lookup.sql index 80e1f47..43e1a1f 100644 --- a/12/docker-postgres/sql/user_lookup.sql +++ b/12/docker-postgres/sql/user_lookup.sql @@ -1,9 +1,9 @@ -CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record - LANGUAGE plpgsql SECURITY DEFINER - AS $$ -BEGIN - SELECT usename, passwd FROM pg_catalog.pg_shadow - WHERE usename = p_username INTO uname, phash; - RETURN; -END; -$$; +CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = p_username INTO uname, phash; + RETURN; +END; +$$; diff --git a/12/docker_start.sh b/12/docker_start.sh index 12819e7..f497e05 100644 --- a/12/docker_start.sh +++ b/12/docker_start.sh @@ -9,5 +9,5 @@ docker run --rm --name my_postgres_12 --shm-size 2147483648 -p 5433:5432/tcp --s -v /mnt/pgbak2/:/mnt/pgbak \ -v /usr/share/postgres/12_1/tsearch_data:/usr/share/postgresql/tsearch_data \ -e POSTGRES_PASSWORD=postgres -e POSTGRES_HOST_AUTH_METHOD=trust -e DEPLOY_PASSWORD=postgres -e PGBOUNCER_PASSWORD=postgres -e TZ="Etc/UTC" \ - grufos/postgres:12.17 \ + grufos/postgres:12.18 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" -c shared_ispell.max_size=70MB diff --git a/12/postgres-service.yml b/12/postgres-service.yml index 4501ffa..232771b 100644 --- a/12/postgres-service.yml +++ b/12/postgres-service.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:12.17 +# image: grufos/postgres:12.18 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,5 +35,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" diff --git a/12/postgres-service_all.yml b/12/postgres-service_all.yml index 70308b9..dd4d530 100644 --- a/12/postgres-service_all.yml +++ b/12/postgres-service_all.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:12.17 +# image: grufos/postgres:12.18 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/12/postgres-service_pgb.yml b/12/postgres-service_pgb.yml index 528a91d..0710b33 100644 --- a/12/postgres-service_pgb.yml +++ b/12/postgres-service_pgb.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:12.17 +# image: grufos/postgres:12.18 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/13/backup-service.yml b/13/backup-service.yml index f04c69e..16754c4 100644 --- a/13/backup-service.yml +++ b/13/backup-service.yml @@ -20,5 +20,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" BACKUP_STREAM: "yes" diff --git a/13/bin/clear_all_docker.sh b/13/bin/clear_all_docker.sh index ac2cdc5..9d06db3 100644 --- a/13/bin/clear_all_docker.sh +++ b/13/bin/clear_all_docker.sh @@ -1,4 +1,5 @@ #!/bin/bash +# docker system prune -a # чистка всех образов в каталоге /var/lib/docker/overlay2 docker stop $(docker ps -q) docker rm -v $(docker ps -aq -f status=exited) docker rmi $(docker image ls -q) -f diff --git a/13/bin/docker_start.sh b/13/bin/docker_start.sh index 9e84238..84c39fe 100644 --- a/13/bin/docker_start.sh +++ b/13/bin/docker_start.sh @@ -4,6 +4,6 @@ docker run -p 127.0.0.1:5433:5432/tcp --shm-size 2147483648 \ -e POSTGRES_HOST_AUTH_METHOD=trust \ -e DEPLOY_PASSWORD=postgres \ -e TZ="Etc/UTC" \ - grufos/postgres:13.13 \ + grufos/postgres:13.14 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" \ -c shared_ispell.max_size=70MB diff --git a/13/bin/harbor_push.sh b/13/bin/harbor_push.sh index 61d8858..412a46d 100644 --- a/13/bin/harbor_push.sh +++ b/13/bin/harbor_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=13 MINOR=13 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" PROJECT=dba_postgres diff --git a/13/bin/hub_push.sh b/13/bin/hub_push.sh index 9acc566..66b6a1a 100644 --- a/13/bin/hub_push.sh +++ b/13/bin/hub_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=13 MINOR=13 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" ACCOUNT=grufos diff --git a/13/docker-mamonsu/bootstrap_post.sql b/13/docker-mamonsu/bootstrap_post.sql index e868c1f..07558c2 100644 --- a/13/docker-mamonsu/bootstrap_post.sql +++ b/13/docker-mamonsu/bootstrap_post.sql @@ -1 +1 @@ -select 'GRANT EXECUTE ON FUNCTION mamonsu.' || proname || '() TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec +select 'GRANT EXECUTE ON FUNCTION mamonsu.' || oid::regprocedure || ' TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec diff --git a/13/docker-mamonsu/metrics.ru.md b/13/docker-mamonsu/metrics.ru.md index 7058720..6dd04b9 100644 --- a/13/docker-mamonsu/metrics.ru.md +++ b/13/docker-mamonsu/metrics.ru.md @@ -1,82 +1,82 @@ -# Описания плагинов - -## pg_probackup.py -Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). -Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. - -### Настройки в секции [pgprobackup] - -| Наименование | Ключ | Описание | -| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | -| enabled | False | По умолчанию плагин отключен. Укажите True для включения | -| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | -| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | -| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | -| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | -| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | - - -### Текущие метрики в Discovery правиле: - -| Наименование | Ключ | Хранить | Описание | -| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | -| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | -| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | -| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | -| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | -| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | -| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | -| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | -| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | - - -### Текущие алерты в Discovery правиле: -Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: - -* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. - -| Категория | Детали | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Важность: | Warning | -| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | - -* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. - -| Категория | Детали | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Важность: | Warning | -| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | -| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | - -* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. - -| Категория | Детали | -| ------------- | ----------------------------------------------------------------------------------- | -| Важность: | Average | -| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | - - -### Текущие графики в Discovery правиле: - -1. Pg_probackup: backup dir: {#BACKUPDIR} size - -Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: - -| Метрика | Сторона графика | Описание | -| ------------------------------------------- | --------------- | -------------------------------------- | -| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | -| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | -| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | - -2. Pg_probackup: backup dir: {#BACKUPDIR} duration - -Показывает 2 метрики с длительностью создания архивных копий: - -| Метрика | Сторона графика | Описание | -| -------------------------------------------- | --------------- | -------------------------------------------------------- | -| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | -| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | +# Описания плагинов + +## pg_probackup.py +Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). +Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. + +### Настройки в секции [pgprobackup] + +| Наименование | Ключ | Описание | +| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | +| enabled | False | По умолчанию плагин отключен. Укажите True для включения | +| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | +| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | +| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | +| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | +| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | + + +### Текущие метрики в Discovery правиле: + +| Наименование | Ключ | Хранить | Описание | +| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | +| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | +| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | +| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | +| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | +| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | +| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | +| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | +| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | + + +### Текущие алерты в Discovery правиле: +Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: + +* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. + +| Категория | Детали | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Важность: | Warning | +| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | + +* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. + +| Категория | Детали | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Важность: | Warning | +| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | +| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | + +* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. + +| Категория | Детали | +| ------------- | ----------------------------------------------------------------------------------- | +| Важность: | Average | +| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | + + +### Текущие графики в Discovery правиле: + +1. Pg_probackup: backup dir: {#BACKUPDIR} size + +Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: + +| Метрика | Сторона графика | Описание | +| ------------------------------------------- | --------------- | -------------------------------------- | +| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | +| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | +| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | + +2. Pg_probackup: backup dir: {#BACKUPDIR} duration + +Показывает 2 метрики с длительностью создания архивных копий: + +| Метрика | Сторона графика | Описание | +| -------------------------------------------- | --------------- | -------------------------------------------------------- | +| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | +| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | diff --git a/13/docker-pgprobackup/backup.sh b/13/docker-pgprobackup/backup.sh index 6fbe28d..5f7cb6a 100644 --- a/13/docker-pgprobackup/backup.sh +++ b/13/docker-pgprobackup/backup.sh @@ -25,13 +25,16 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE fi if [ "$BACKUP_STREAM" = "" ]; then @@ -70,21 +73,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then touch $PGDATA/archive_active.trigger fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then echo "The initial backup must be type FULL ..." - /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS else - # Backup type depends on day or input parameter - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + else + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi STATUS=`/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS +else + # чистим старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS +fi + # collecting statistics on backups /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt diff --git a/13/docker-postgres/Dockerfile b/13/docker-postgres/Dockerfile index e785769..c70eda4 100644 --- a/13/docker-postgres/Dockerfile +++ b/13/docker-postgres/Dockerfile @@ -4,7 +4,7 @@ # https://hub.docker.com/r/postgis/postgis # https://github.com/postgis/docker-postgis # -FROM postgres:13.13 +FROM postgres:13.14 LABEL maintainer="Sergey Grinko " @@ -16,7 +16,7 @@ ENV BACKUP_PATH /mnt/pgbak ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" ENV RUM_VERSION 1.3.13 -RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 \ +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ && apt-get update \ && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ # подключаем репозитарий архивной утилиты @@ -64,7 +64,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && make USE_PGXS=1 install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && make USE_PGXS=1 install \ @@ -102,7 +102,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && cd / \ && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ # ====== clean all unused package... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/13/docker-postgres/backup.sh b/13/docker-postgres/backup.sh index 5d4d510..f82006f 100644 --- a/13/docker-postgres/backup.sh +++ b/13/docker-postgres/backup.sh @@ -30,10 +30,6 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$BACKUP_STREAM" = "" ]; then BACKUP_STREAM="stream" fi @@ -51,15 +47,14 @@ fi if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full else # make an incremental backup on other days of the week - BACKUP_MODE=page + BACKUPMODE=page fi - -if [ "$1" != "" ]; then +if [ "$BACKUP_MODE" != "" ]; then # The backup creation mode is given forcibly - BACKUP_MODE=$1 + BACKUPMODE=$BACKUP_MODE fi BACKUP_STREAM="--stream" @@ -75,7 +70,6 @@ if [ "$3" != "" ]; then BACKUP_THREADS=$3 fi - cd $BACKUP_PATH COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` @@ -98,21 +92,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then su - postgres -c "touch $PGDATA/archive_active.trigger" fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then # Full backup needs to be forcibly - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" else # Backup type depends on day or input parameter - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + else + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi STATUS=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'"` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS" +else + # чистим старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS" +fi # collecting statistics on backups su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" diff --git a/13/docker-postgres/sql/user_lookup.sql b/13/docker-postgres/sql/user_lookup.sql index 80e1f47..43e1a1f 100644 --- a/13/docker-postgres/sql/user_lookup.sql +++ b/13/docker-postgres/sql/user_lookup.sql @@ -1,9 +1,9 @@ -CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record - LANGUAGE plpgsql SECURITY DEFINER - AS $$ -BEGIN - SELECT usename, passwd FROM pg_catalog.pg_shadow - WHERE usename = p_username INTO uname, phash; - RETURN; -END; -$$; +CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = p_username INTO uname, phash; + RETURN; +END; +$$; diff --git a/13/docker_start.sh b/13/docker_start.sh index 5bac5b1..676c086 100644 --- a/13/docker_start.sh +++ b/13/docker_start.sh @@ -9,5 +9,5 @@ docker run --rm --name my_postgres_13 --shm-size 2147483648 -p 5433:5432/tcp --s -v /mnt/pgbak2/:/mnt/pgbak \ -v /usr/share/postgres/13_1/tsearch_data:/usr/share/postgresql/tsearch_data \ -e POSTGRES_PASSWORD=postgres -e POSTGRES_HOST_AUTH_METHOD=trust -e DEPLOY_PASSWORD=postgres -e PGBOUNCER_PASSWORD=postgres -e TZ="Etc/UTC" \ - grufos/postgres:13.13 \ + grufos/postgres:13.14 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" -c shared_ispell.max_size=70MB diff --git a/13/postgres-service.yml b/13/postgres-service.yml index eb0bc24..6a3fa73 100644 --- a/13/postgres-service.yml +++ b/13/postgres-service.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:13.13 +# image: grufos/postgres:13.14 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,5 +35,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" diff --git a/13/postgres-service_all.yml b/13/postgres-service_all.yml index 95a85a3..a17522c 100644 --- a/13/postgres-service_all.yml +++ b/13/postgres-service_all.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:13.13 +# image: grufos/postgres:13.14 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/13/postgres-service_pgb.yml b/13/postgres-service_pgb.yml index 3fcf900..076b68d 100644 --- a/13/postgres-service_pgb.yml +++ b/13/postgres-service_pgb.yml @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/14/backup-service.yml b/14/backup-service.yml index 627c175..52ed67e 100644 --- a/14/backup-service.yml +++ b/14/backup-service.yml @@ -20,5 +20,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" BACKUP_STREAM: "yes" diff --git a/14/bin/clear_all_docker.sh b/14/bin/clear_all_docker.sh index ac2cdc5..9d06db3 100644 --- a/14/bin/clear_all_docker.sh +++ b/14/bin/clear_all_docker.sh @@ -1,4 +1,5 @@ #!/bin/bash +# docker system prune -a # чистка всех образов в каталоге /var/lib/docker/overlay2 docker stop $(docker ps -q) docker rm -v $(docker ps -aq -f status=exited) docker rmi $(docker image ls -q) -f diff --git a/14/bin/docker_start.sh b/14/bin/docker_start.sh index fe483e4..214d5d1 100644 --- a/14/bin/docker_start.sh +++ b/14/bin/docker_start.sh @@ -4,6 +4,6 @@ docker run -p 127.0.0.1:5433:5432/tcp --shm-size 2147483648 \ -e POSTGRES_HOST_AUTH_METHOD=trust \ -e DEPLOY_PASSWORD=postgres \ -e TZ="Etc/UTC" \ - grufos/postgres:14.10 \ + grufos/postgres:14.11 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" \ -c shared_ispell.max_size=70MB diff --git a/14/bin/harbor_push.sh b/14/bin/harbor_push.sh index 05ba0bc..dd738b4 100644 --- a/14/bin/harbor_push.sh +++ b/14/bin/harbor_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=14 MINOR=10 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" PROJECT=dba_postgres diff --git a/14/bin/hub_push.sh b/14/bin/hub_push.sh index 9ef5712..f255d72 100644 --- a/14/bin/hub_push.sh +++ b/14/bin/hub_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=14 MINOR=10 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" ACCOUNT=grufos diff --git a/14/docker-mamonsu/bootstrap_post.sql b/14/docker-mamonsu/bootstrap_post.sql index e868c1f..07558c2 100644 --- a/14/docker-mamonsu/bootstrap_post.sql +++ b/14/docker-mamonsu/bootstrap_post.sql @@ -1 +1 @@ -select 'GRANT EXECUTE ON FUNCTION mamonsu.' || proname || '() TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec +select 'GRANT EXECUTE ON FUNCTION mamonsu.' || oid::regprocedure || ' TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec diff --git a/14/docker-mamonsu/metrics.ru.md b/14/docker-mamonsu/metrics.ru.md index 7058720..6dd04b9 100644 --- a/14/docker-mamonsu/metrics.ru.md +++ b/14/docker-mamonsu/metrics.ru.md @@ -1,82 +1,82 @@ -# Описания плагинов - -## pg_probackup.py -Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). -Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. - -### Настройки в секции [pgprobackup] - -| Наименование | Ключ | Описание | -| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | -| enabled | False | По умолчанию плагин отключен. Укажите True для включения | -| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | -| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | -| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | -| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | -| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | - - -### Текущие метрики в Discovery правиле: - -| Наименование | Ключ | Хранить | Описание | -| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | -| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | -| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | -| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | -| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | -| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | -| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | -| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | -| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | - - -### Текущие алерты в Discovery правиле: -Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: - -* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. - -| Категория | Детали | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Важность: | Warning | -| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | - -* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. - -| Категория | Детали | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Важность: | Warning | -| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | -| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | - -* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. - -| Категория | Детали | -| ------------- | ----------------------------------------------------------------------------------- | -| Важность: | Average | -| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | - - -### Текущие графики в Discovery правиле: - -1. Pg_probackup: backup dir: {#BACKUPDIR} size - -Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: - -| Метрика | Сторона графика | Описание | -| ------------------------------------------- | --------------- | -------------------------------------- | -| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | -| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | -| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | - -2. Pg_probackup: backup dir: {#BACKUPDIR} duration - -Показывает 2 метрики с длительностью создания архивных копий: - -| Метрика | Сторона графика | Описание | -| -------------------------------------------- | --------------- | -------------------------------------------------------- | -| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | -| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | +# Описания плагинов + +## pg_probackup.py +Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). +Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. + +### Настройки в секции [pgprobackup] + +| Наименование | Ключ | Описание | +| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | +| enabled | False | По умолчанию плагин отключен. Укажите True для включения | +| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | +| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | +| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | +| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | +| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | + + +### Текущие метрики в Discovery правиле: + +| Наименование | Ключ | Хранить | Описание | +| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | +| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | +| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | +| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | +| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | +| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | +| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | +| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | +| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | + + +### Текущие алерты в Discovery правиле: +Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: + +* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. + +| Категория | Детали | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Важность: | Warning | +| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | + +* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. + +| Категория | Детали | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Важность: | Warning | +| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | +| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | + +* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. + +| Категория | Детали | +| ------------- | ----------------------------------------------------------------------------------- | +| Важность: | Average | +| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | + + +### Текущие графики в Discovery правиле: + +1. Pg_probackup: backup dir: {#BACKUPDIR} size + +Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: + +| Метрика | Сторона графика | Описание | +| ------------------------------------------- | --------------- | -------------------------------------- | +| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | +| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | +| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | + +2. Pg_probackup: backup dir: {#BACKUPDIR} duration + +Показывает 2 метрики с длительностью создания архивных копий: + +| Метрика | Сторона графика | Описание | +| -------------------------------------------- | --------------- | -------------------------------------------------------- | +| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | +| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | diff --git a/14/docker-pgprobackup/backup.sh b/14/docker-pgprobackup/backup.sh index 6fbe28d..5f7cb6a 100644 --- a/14/docker-pgprobackup/backup.sh +++ b/14/docker-pgprobackup/backup.sh @@ -25,13 +25,16 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE fi if [ "$BACKUP_STREAM" = "" ]; then @@ -70,21 +73,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then touch $PGDATA/archive_active.trigger fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then echo "The initial backup must be type FULL ..." - /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS else - # Backup type depends on day or input parameter - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + else + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi STATUS=`/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS +else + # чистим старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS +fi + # collecting statistics on backups /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt diff --git a/14/docker-postgres/Dockerfile b/14/docker-postgres/Dockerfile index 9b0e35c..6ea6263 100644 --- a/14/docker-postgres/Dockerfile +++ b/14/docker-postgres/Dockerfile @@ -4,7 +4,7 @@ # https://hub.docker.com/r/postgis/postgis # https://github.com/postgis/docker-postgis # -FROM postgres:14.10 +FROM postgres:14.11 LABEL maintainer="Sergey Grinko " @@ -16,7 +16,7 @@ ENV BACKUP_PATH /mnt/pgbak ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" ENV RUM_VERSION 1.3.13 -RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 \ +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ && apt-get update \ && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ # подключаем репозитарий архивной утилиты @@ -64,7 +64,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && make USE_PGXS=1 install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && make USE_PGXS=1 install \ @@ -102,7 +102,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && cd / \ && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ # ====== clean all unused package... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/14/docker-postgres/backup.sh b/14/docker-postgres/backup.sh index 5d4d510..f82006f 100644 --- a/14/docker-postgres/backup.sh +++ b/14/docker-postgres/backup.sh @@ -30,10 +30,6 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$BACKUP_STREAM" = "" ]; then BACKUP_STREAM="stream" fi @@ -51,15 +47,14 @@ fi if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full else # make an incremental backup on other days of the week - BACKUP_MODE=page + BACKUPMODE=page fi - -if [ "$1" != "" ]; then +if [ "$BACKUP_MODE" != "" ]; then # The backup creation mode is given forcibly - BACKUP_MODE=$1 + BACKUPMODE=$BACKUP_MODE fi BACKUP_STREAM="--stream" @@ -75,7 +70,6 @@ if [ "$3" != "" ]; then BACKUP_THREADS=$3 fi - cd $BACKUP_PATH COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` @@ -98,21 +92,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then su - postgres -c "touch $PGDATA/archive_active.trigger" fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then # Full backup needs to be forcibly - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" else # Backup type depends on day or input parameter - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + else + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi STATUS=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'"` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS" +else + # чистим старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS" +fi # collecting statistics on backups su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" diff --git a/14/docker-postgres/sql/user_lookup.sql b/14/docker-postgres/sql/user_lookup.sql index 80e1f47..43e1a1f 100644 --- a/14/docker-postgres/sql/user_lookup.sql +++ b/14/docker-postgres/sql/user_lookup.sql @@ -1,9 +1,9 @@ -CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record - LANGUAGE plpgsql SECURITY DEFINER - AS $$ -BEGIN - SELECT usename, passwd FROM pg_catalog.pg_shadow - WHERE usename = p_username INTO uname, phash; - RETURN; -END; -$$; +CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = p_username INTO uname, phash; + RETURN; +END; +$$; diff --git a/14/docker_start.sh b/14/docker_start.sh index e40ffc7..fc4c83f 100644 --- a/14/docker_start.sh +++ b/14/docker_start.sh @@ -9,5 +9,5 @@ docker run --rm --name my_postgres_14 --shm-size 2147483648 -p 5433:5432/tcp --s -v /mnt/pgbak2/:/mnt/pgbak \ -v /usr/share/postgres/14_1/tsearch_data:/usr/share/postgresql/tsearch_data \ -e POSTGRES_PASSWORD=postgres -e POSTGRES_HOST_AUTH_METHOD=trust -e DEPLOY_PASSWORD=postgres -e PGBOUNCER_PASSWORD=postgres -e TZ="Etc/UTC" \ - grufos/postgres:14.10 \ + grufos/postgres:14.11 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" -c shared_ispell.max_size=70MB diff --git a/14/postgres-service.yml b/14/postgres-service.yml index 2c22cc0..c723b79 100644 --- a/14/postgres-service.yml +++ b/14/postgres-service.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:14.10 +# image: grufos/postgres:14.11 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,5 +35,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" diff --git a/14/postgres-service_all.yml b/14/postgres-service_all.yml index 378e5f3..878e084 100644 --- a/14/postgres-service_all.yml +++ b/14/postgres-service_all.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:14.10 +# image: grufos/postgres:14.11 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/14/postgres-service_pgb.yml b/14/postgres-service_pgb.yml index 191de09..3bb270e 100644 --- a/14/postgres-service_pgb.yml +++ b/14/postgres-service_pgb.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:14.10 +# image: grufos/postgres:14.11 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/15/backup-service.yml b/15/backup-service.yml index 356f2c3..3e15d81 100644 --- a/15/backup-service.yml +++ b/15/backup-service.yml @@ -20,5 +20,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" BACKUP_STREAM: "yes" diff --git a/15/bin/clear_all_docker.sh b/15/bin/clear_all_docker.sh index ac2cdc5..9d06db3 100644 --- a/15/bin/clear_all_docker.sh +++ b/15/bin/clear_all_docker.sh @@ -1,4 +1,5 @@ #!/bin/bash +# docker system prune -a # чистка всех образов в каталоге /var/lib/docker/overlay2 docker stop $(docker ps -q) docker rm -v $(docker ps -aq -f status=exited) docker rmi $(docker image ls -q) -f diff --git a/15/bin/docker_start.sh b/15/bin/docker_start.sh index 192eae3..77adca3 100644 --- a/15/bin/docker_start.sh +++ b/15/bin/docker_start.sh @@ -4,6 +4,6 @@ docker run -p 127.0.0.1:5433:5432/tcp --shm-size 2147483648 \ -e POSTGRES_HOST_AUTH_METHOD=trust \ -e DEPLOY_PASSWORD=postgres \ -e TZ="Etc/UTC" \ - grufos/postgres:15.5 \ + grufos/postgres:15.6 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" \ -c shared_ispell.max_size=70MB diff --git a/15/bin/harbor_push.sh b/15/bin/harbor_push.sh index 6e1913e..f032f75 100644 --- a/15/bin/harbor_push.sh +++ b/15/bin/harbor_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=15 MINOR=5 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" PROJECT=dba_postgres diff --git a/15/bin/hub_push.sh b/15/bin/hub_push.sh index 144c683..7b704b4 100644 --- a/15/bin/hub_push.sh +++ b/15/bin/hub_push.sh @@ -1,7 +1,7 @@ #!/bin/bash VERSION=15 MINOR=5 -VERS_BOUNCER="1.21.0" +VERS_BOUNCER="1.22.0" VERS_PROBACKUP="2.5.13" VERS_MAMONSU="3.5.5" ACCOUNT=grufos diff --git a/15/bin/upgrade_start.sh b/15/bin/upgrade_start.sh index c002c8b..5f6482f 100644 --- a/15/bin/upgrade_start.sh +++ b/15/bin/upgrade_start.sh @@ -1,10 +1,14 @@ #!/bin/bash # создаём все необходимые каталоги -mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/15_1 /var/lib/pgsql/15_1/14 /usr/share/postgres/15_1/tsearch_data -chown 999:999 /var/log/postgresql1 /var/lib/pgsql/15_1 /var/lib/pgsql/15_1/14 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/15_1 /usr/share/postgres/15_1/tsearch_data +SRC="14" +DEST="15" +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/mamonsu1 /var/lib/pgsql/${DEST}_1 /usr/share/postgres/${DEST}_1 +chown 999:999 /mnt/pgbak2 /var/log/postgresql1 /var/log/mamonsu1 /var/lib/pgsql/${DEST}_1 /usr/share/postgres/${DEST}_1 +rm -rf /usr/share/postgres/${DEST}_1/* +rm -rf /var/lib/pgsql/${DEST}_1/* +mkdir -p /var/lib/pgsql/${DEST}_1/${DEST} /var/lib/pgsql/${DEST}_1/$SRC /usr/share/postgres/${DEST}_1/tsearch_data +chown 999:999 /var/lib/pgsql/${DEST}_1/${DEST} /var/lib/pgsql/${DEST}_1/$SRC /usr/share/postgres/${DEST}_1/tsearch_data +cp -rpf /var/lib/pgsql/${SRC}_1/* /var/lib/pgsql/${DEST}_1/$SRC clear # запускаем сборку -rm -rf /usr/share/postgres/15_1/* -rm -rf /var/lib/pgsql/15_1/* -cp -rpf /var/lib/pgsql/14_1 /var/lib/pgsql/15_1/14 docker-compose -f "postgres-pgupgrade.yml" up --build "$@" diff --git a/15/docker-analyze/Dockerfile b/15/docker-analyze/Dockerfile index 00e4d9e..5353e38 100644 --- a/15/docker-analyze/Dockerfile +++ b/15/docker-analyze/Dockerfile @@ -27,7 +27,7 @@ RUN apt-get update \ && mkdir -p /var/log/pgbouncer \ && mkdir -p /var/log/mamonsu \ # ... cleaning ... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-mamonsu/bootstrap_post.sql b/15/docker-mamonsu/bootstrap_post.sql index e868c1f..07558c2 100644 --- a/15/docker-mamonsu/bootstrap_post.sql +++ b/15/docker-mamonsu/bootstrap_post.sql @@ -1 +1 @@ -select 'GRANT EXECUTE ON FUNCTION mamonsu.' || proname || '() TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec +select 'GRANT EXECUTE ON FUNCTION mamonsu.' || oid::regprocedure || ' TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec diff --git a/15/docker-mamonsu/metrics.ru.md b/15/docker-mamonsu/metrics.ru.md index 7058720..6dd04b9 100644 --- a/15/docker-mamonsu/metrics.ru.md +++ b/15/docker-mamonsu/metrics.ru.md @@ -1,82 +1,82 @@ -# Описания плагинов - -## pg_probackup.py -Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). -Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. - -### Настройки в секции [pgprobackup] - -| Наименование | Ключ | Описание | -| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | -| enabled | False | По умолчанию плагин отключен. Укажите True для включения | -| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | -| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | -| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | -| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | -| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | - - -### Текущие метрики в Discovery правиле: - -| Наименование | Ключ | Хранить | Описание | -| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | -| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | -| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | -| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | -| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | -| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | -| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | -| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | -| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | -| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | - - -### Текущие алерты в Discovery правиле: -Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: - -* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. - -| Категория | Детали | -| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Важность: | Warning | -| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | - -* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. - -| Категория | Детали | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Важность: | Warning | -| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | -| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | - -* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. - -| Категория | Детали | -| ------------- | ----------------------------------------------------------------------------------- | -| Важность: | Average | -| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | -| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | - - -### Текущие графики в Discovery правиле: - -1. Pg_probackup: backup dir: {#BACKUPDIR} size - -Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: - -| Метрика | Сторона графика | Описание | -| ------------------------------------------- | --------------- | -------------------------------------- | -| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | -| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | -| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | - -2. Pg_probackup: backup dir: {#BACKUPDIR} duration - -Показывает 2 метрики с длительностью создания архивных копий: - -| Метрика | Сторона графика | Описание | -| -------------------------------------------- | --------------- | -------------------------------------------------------- | -| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | -| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | +# Описания плагинов + +## pg_probackup.py +Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). +Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. + +### Настройки в секции [pgprobackup] + +| Наименование | Ключ | Описание | +| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | +| enabled | False | По умолчанию плагин отключен. Укажите True для включения | +| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | +| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | +| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | +| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | +| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | + + +### Текущие метрики в Discovery правиле: + +| Наименование | Ключ | Хранить | Описание | +| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | +| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | +| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | +| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | +| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | +| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | +| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | +| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | +| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | + + +### Текущие алерты в Discovery правиле: +Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: + +* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. + +| Категория | Детали | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Важность: | Warning | +| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | + +* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. + +| Категория | Детали | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Важность: | Warning | +| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | +| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | + +* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. + +| Категория | Детали | +| ------------- | ----------------------------------------------------------------------------------- | +| Важность: | Average | +| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | + + +### Текущие графики в Discovery правиле: + +1. Pg_probackup: backup dir: {#BACKUPDIR} size + +Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: + +| Метрика | Сторона графика | Описание | +| ------------------------------------------- | --------------- | -------------------------------------- | +| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | +| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | +| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | + +2. Pg_probackup: backup dir: {#BACKUPDIR} duration + +Показывает 2 метрики с длительностью создания архивных копий: + +| Метрика | Сторона графика | Описание | +| -------------------------------------------- | --------------- | -------------------------------------------------------- | +| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | +| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | diff --git a/15/docker-pgprobackup/Dockerfile b/15/docker-pgprobackup/Dockerfile index 6ac1b6f..3bf6697 100644 --- a/15/docker-pgprobackup/Dockerfile +++ b/15/docker-pgprobackup/Dockerfile @@ -34,7 +34,7 @@ RUN apt-get update \ && mkdir -p $BACKUP_PATH \ && chown -R postgres:postgres $BACKUP_PATH \ # ... cleaning ... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-pgprobackup/backup.sh b/15/docker-pgprobackup/backup.sh index 6fbe28d..5f7cb6a 100644 --- a/15/docker-pgprobackup/backup.sh +++ b/15/docker-pgprobackup/backup.sh @@ -25,13 +25,16 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE fi if [ "$BACKUP_STREAM" = "" ]; then @@ -70,21 +73,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then touch $PGDATA/archive_active.trigger fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then echo "The initial backup must be type FULL ..." - /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS else - # Backup type depends on day or input parameter - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + else + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi STATUS=`/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS +else + # чистим старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS +fi + # collecting statistics on backups /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt /usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt diff --git a/15/docker-pgprocheckdb/Dockerfile b/15/docker-pgprocheckdb/Dockerfile index 4d7019a..69a8ed3 100644 --- a/15/docker-pgprocheckdb/Dockerfile +++ b/15/docker-pgprocheckdb/Dockerfile @@ -35,7 +35,7 @@ RUN apt-get update \ && mkdir -p $BACKUP_PATH \ && chown -R postgres:postgres $BACKUP_PATH \ # ... cleaning ... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-pgprorestore/Dockerfile b/15/docker-pgprorestore/Dockerfile index eaded07..9afaba8 100644 --- a/15/docker-pgprorestore/Dockerfile +++ b/15/docker-pgprorestore/Dockerfile @@ -32,7 +32,7 @@ RUN apt-get update \ && mkdir -p $BACKUP_PATH \ && chown -R postgres:postgres $BACKUP_PATH \ # ... cleaning ... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-pgupgrade/Dockerfile b/15/docker-pgupgrade/Dockerfile index 6d97b9b..41948af 100644 --- a/15/docker-pgupgrade/Dockerfile +++ b/15/docker-pgupgrade/Dockerfile @@ -4,7 +4,7 @@ # https://hub.docker.com/r/postgis/postgis # https://github.com/postgis/docker-postgis # -FROM postgres:15.5 +FROM postgres:15.6 LABEL maintainer="Sergey Grinko " ENV PG_MAJOR_OLD 14 @@ -29,7 +29,7 @@ ENV BACKUP_PATH /mnt/pgbak ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" ENV RUM_VERSION 1.3.13 -RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 \ +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ && apt-get update \ && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ && apt-cache showpkg postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ @@ -86,7 +86,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && make USE_PGXS=1 install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && make USE_PGXS=1 install \ @@ -166,12 +166,12 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && PATH=$PATH make install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && PATH=$PATH make USE_PGXS=1 install \ # ====== clean all unused package... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-postgres/Dockerfile b/15/docker-postgres/Dockerfile index 480668e..91b9fa2 100644 --- a/15/docker-postgres/Dockerfile +++ b/15/docker-postgres/Dockerfile @@ -4,7 +4,7 @@ # https://hub.docker.com/r/postgis/postgis # https://github.com/postgis/docker-postgis # -FROM postgres:15.5 +FROM postgres:15.6 LABEL maintainer="Sergey Grinko " @@ -16,7 +16,7 @@ ENV BACKUP_PATH /mnt/pgbak ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" ENV RUM_VERSION 1.3.13 -RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 \ +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ && apt-get update \ && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ # подключаем репозитарий архивной утилиты @@ -64,7 +64,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && make USE_PGXS=1 install \ # ====== pg_variables && cd /tmp/build_ext \ - && git clone https://github.com/postgrespro/pg_variables \ + && git clone https://github.com/xinferum/pg_variables \ && cd pg_variables \ && make USE_PGXS=1 \ && make USE_PGXS=1 install \ @@ -102,7 +102,7 @@ RUN localedef -i ru_RU -c -f UTF-8 -A /usr/share/locale/locale.alias ru_RU.UTF-8 && cd / \ && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ # ====== clean all unused package... - && apt-get purge -y git* mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ && apt-get -f install \ && apt-get -y autoremove \ && apt-get -y clean \ diff --git a/15/docker-postgres/backup.sh b/15/docker-postgres/backup.sh index 5d4d510..f82006f 100644 --- a/15/docker-postgres/backup.sh +++ b/15/docker-postgres/backup.sh @@ -30,10 +30,6 @@ if [ "$BACKUP_THREADS" = "" ]; then BACKUP_THREADS=4 fi -if [ "$BACKUP_MODE" = "" ]; then - BACKUP_MODE=page -fi - if [ "$BACKUP_STREAM" = "" ]; then BACKUP_STREAM="stream" fi @@ -51,15 +47,14 @@ fi if [ "$DOW" = "6" ] ; then # make a full backup once a week (Saturday) - BACKUP_MODE=full + BACKUPMODE=full else # make an incremental backup on other days of the week - BACKUP_MODE=page + BACKUPMODE=page fi - -if [ "$1" != "" ]; then +if [ "$BACKUP_MODE" != "" ]; then # The backup creation mode is given forcibly - BACKUP_MODE=$1 + BACKUPMODE=$BACKUP_MODE fi BACKUP_STREAM="--stream" @@ -75,7 +70,6 @@ if [ "$3" != "" ]; then BACKUP_THREADS=$3 fi - cd $BACKUP_PATH COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` @@ -98,21 +92,33 @@ if ! [ -f $PGDATA/archive_active.trigger ] ; then su - postgres -c "touch $PGDATA/archive_active.trigger" fi -if [[ "$IS_FULL" = "" || $BACKUP_MODE = "full" ]] ; then +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then # Full backup needs to be forcibly - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" else # Backup type depends on day or input parameter - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUP_MODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + else + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi STATUS=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'"` LAST_STATE=${STATUS//'"'/''} if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then # You need to run a full backup, as an error occurred with incremental # Perhaps the loss of the segment at Failover ... - su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS --delete-expired --delete-wal" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" fi fi +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS" +else + # чистим старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS" +fi # collecting statistics on backups su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" diff --git a/15/docker-postgres/sql/user_lookup.sql b/15/docker-postgres/sql/user_lookup.sql index 80e1f47..43e1a1f 100644 --- a/15/docker-postgres/sql/user_lookup.sql +++ b/15/docker-postgres/sql/user_lookup.sql @@ -1,9 +1,9 @@ -CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record - LANGUAGE plpgsql SECURITY DEFINER - AS $$ -BEGIN - SELECT usename, passwd FROM pg_catalog.pg_shadow - WHERE usename = p_username INTO uname, phash; - RETURN; -END; -$$; +CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = p_username INTO uname, phash; + RETURN; +END; +$$; diff --git a/15/docker_start.sh b/15/docker_start.sh index c157d06..69f4b8b 100644 --- a/15/docker_start.sh +++ b/15/docker_start.sh @@ -9,5 +9,5 @@ docker run --rm --name my_postgres_15 --shm-size 2147483648 -p 5433:5432/tcp --s -v /mnt/pgbak2/:/mnt/pgbak \ -v /usr/share/postgres/15_1/tsearch_data:/usr/share/postgresql/tsearch_data \ -e POSTGRES_PASSWORD=postgres -e POSTGRES_HOST_AUTH_METHOD=trust -e DEPLOY_PASSWORD=postgres -e PGBOUNCER_PASSWORD=postgres -e TZ="Etc/UTC" \ - grufos/postgres:15.5 \ + grufos/postgres:15.6 \ -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" -c shared_ispell.max_size=70MB diff --git a/15/postgres-pgupgrade.yml b/15/postgres-pgupgrade.yml index a29d3a0..5a4fb94 100644 --- a/15/postgres-pgupgrade.yml +++ b/15/postgres-pgupgrade.yml @@ -3,7 +3,7 @@ services: pgupgrade: -# image: grufos/pgupgrade:15.5 +# image: grufos/pgupgrade:15.6 build: context: ./docker-pgupgrade dockerfile: Dockerfile diff --git a/15/postgres-service.yml b/15/postgres-service.yml index e6f7a07..e28bb13 100644 --- a/15/postgres-service.yml +++ b/15/postgres-service.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:15.5 +# image: grufos/postgres:15.6 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,5 +35,5 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" diff --git a/15/postgres-service_all.yml b/15/postgres-service_all.yml index b25577c..9a3b999 100644 --- a/15/postgres-service_all.yml +++ b/15/postgres-service_all.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:15.5 +# image: grufos/postgres:15.6 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/15/postgres-service_pgb.yml b/15/postgres-service_pgb.yml index cab9de7..27c9e30 100644 --- a/15/postgres-service_pgb.yml +++ b/15/postgres-service_pgb.yml @@ -3,7 +3,7 @@ services: postgres: -# image: grufos/postgres:15.5 +# image: grufos/postgres:15.6 build: context: ./docker-postgres dockerfile: Dockerfile @@ -35,10 +35,10 @@ services: EMAIL_SERVER: "mail.company.ru" EMAIL_HOSTNAME: "noreplay@myhost.ru" BACKUP_THREADS: "4" - BACKUP_MODE: "page" + BACKUP_MODE: "" pgbouncer: -# image: grufos/pgbouncer:1.17.0 +# image: grufos/pgbouncer:1.22.0 build: context: ./docker-pgbouncer dockerfile: Dockerfile diff --git a/16/analyze-service.yml b/16/analyze-service.yml new file mode 100644 index 0000000..caeceb4 --- /dev/null +++ b/16/analyze-service.yml @@ -0,0 +1,23 @@ +version: '3.5' +services: + + analyze: + build: + context: ./docker-analyze + dockerfile: Dockerfile + + volumes: + - "/var/log/postgresql1:/var/log/postgresql" + - "/var/log/pgbouncer1:/var/log/pgbouncer" + - "/var/log/mamonsu1:/var/log/mamonsu" + + environment: + STAT_STATEMENTS: "true" +# TZ: "Etc/UTC" + TZ: "Europe/Moscow" + PGPASSWORD: qweasdzxc +# PGHOST: 10.10.2.139 + PGHOST: postgres + PGPORT: 5432 + PGBHOST: pgbouncer + PGBPORT: 6432 diff --git a/16/backup-service.yml b/16/backup-service.yml new file mode 100644 index 0000000..a05fb0a --- /dev/null +++ b/16/backup-service.yml @@ -0,0 +1,24 @@ +version: '3.5' +services: + + pgprobackup: + build: + context: ./docker-pgprobackup + dockerfile: Dockerfile + + volumes: + - "/var/lib/pgsql/16_1/data:/var/lib/postgresql/data" + - "/mnt/pgbak2/:/mnt/pgbak/" + + environment: + TZ: "Europe/Moscow" + PGHOST: postgres + PGPORT: 5432 +# PGUSER: "postgres" +# PGPASSWORD: "qweasdzxc" + EMAILTO: "DBA-PostgreSQL@company.ru" + EMAIL_SERVER: "mail.company.ru" + EMAIL_HOSTNAME: "noreplay@myhost.ru" + BACKUP_THREADS: "4" + BACKUP_MODE: "" + BACKUP_STREAM: "yes" diff --git a/16/bin/analyze_start.sh b/16/bin/analyze_start.sh new file mode 100644 index 0000000..2c492ea --- /dev/null +++ b/16/bin/analyze_start.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +docker-compose -f "analyze-service.yml" up --build "$@" diff --git a/16/bin/backup_start.sh b/16/bin/backup_start.sh new file mode 100644 index 0000000..f98f635 --- /dev/null +++ b/16/bin/backup_start.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +docker-compose -f "backup-service.yml" up --build "$@" diff --git a/16/bin/check_cluster_start.sh b/16/bin/check_cluster_start.sh new file mode 100644 index 0000000..af4b409 --- /dev/null +++ b/16/bin/check_cluster_start.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +docker-compose -f "check_cluster_service.yml" up --build "$@" diff --git a/16/bin/clear_all_docker.sh b/16/bin/clear_all_docker.sh new file mode 100644 index 0000000..9d06db3 --- /dev/null +++ b/16/bin/clear_all_docker.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# docker system prune -a # чистка всех образов в каталоге /var/lib/docker/overlay2 +docker stop $(docker ps -q) +docker rm -v $(docker ps -aq -f status=exited) +docker rmi $(docker image ls -q) -f +docker rmi $(docker image ls -q) -f diff --git a/16/bin/docker_build.sh b/16/bin/docker_build.sh new file mode 100644 index 0000000..1178118 --- /dev/null +++ b/16/bin/docker_build.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# получает через пробел имена контейнеров для сборки. Если не указано, то принимается такая строка: +# pgbouncer postgres pgupgrade analyze mamonsu pgprobackup pgprorestore pgprocheckdb +# +VERSION=16 + +set -euo pipefail + +if [[ $# -ne 0 ]]; then + LISTDOCKER=$@ +else + LISTDOCKER="pgbouncer postgres pgupgrade analyze mamonsu pgprobackup pgprorestore pgprocheckdb" +fi + +for param in $LISTDOCKER +do + cd docker-$param + dir=`pwd` + echo "" + echo "=====================================" + echo " $dir" + echo "=====================================" + echo "" + docker build --no-cache . -t ${VERSION}_$param:latest + cd .. +done + +docker image ls --all diff --git a/16/bin/docker_start.sh b/16/bin/docker_start.sh new file mode 100644 index 0000000..db715a9 --- /dev/null +++ b/16/bin/docker_start.sh @@ -0,0 +1,9 @@ +#!/bin/bash +docker run -p 127.0.0.1:5433:5432/tcp --shm-size 2147483648 \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_HOST_AUTH_METHOD=trust \ + -e DEPLOY_PASSWORD=postgres \ + -e TZ="Etc/UTC" \ + grufos/postgres:16.2 \ + -c shared_preload_libraries="plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm" \ + -c shared_ispell.max_size=70MB diff --git a/16/bin/harbor_push.sh b/16/bin/harbor_push.sh new file mode 100644 index 0000000..f43b3ea --- /dev/null +++ b/16/bin/harbor_push.sh @@ -0,0 +1,43 @@ +#!/bin/bash +VERSION=16 +MINOR=2 +VERS_BOUNCER="1.22.0" +VERS_PROBACKUP="2.5.13" +VERS_MAMONSU="3.5.5" +PROJECT=dba_postgres +URL=harbor.company.ru +ACCOUNT="${URL}/${PROJECT}" +LATEST_PUSH='yes' + +set -euo pipefail + +if [[ $# -ne 0 ]]; then + LISTDOCKER=$@ +else + LISTDOCKER="pgbouncer postgres pgupgrade analyze mamonsu pgprobackup pgprorestore pgprocheckdb" +fi + +for param in $LISTDOCKER +do + if [ "$param" = "pgbouncer" ]; then + vers="${VERS_BOUNCER}" + elif [ "$param" = "mamonsu" ]; then + vers="${VERSION}_${VERS_MAMONSU}" + elif [[ "$param" = "pgprobackup" || $param = "pgprorestore" || $param = "pgprocheckdb" ]]; then + vers="${VERSION}.${MINOR}_${VERS_PROBACKUP}" + else + vers="${VERSION}.${MINOR}" + fi + echo "=======================" + echo "${param} -> ${vers}" + echo "=======================" + if ! docker image ls | grep "${ACCOUNT}/${param}" ; then + echo " push ..." + docker tag ${VERSION}_${param}:latest ${ACCOUNT}/${param}:latest + if [ "$LATEST_PUSH" = "yes" ]; then + docker push ${ACCOUNT}/${param}:latest + fi + docker tag ${ACCOUNT}/${param}:latest ${ACCOUNT}/${param}:${vers} + docker push ${ACCOUNT}/${param}:${vers} + fi +done diff --git a/16/bin/hub_push.sh b/16/bin/hub_push.sh new file mode 100644 index 0000000..f4fc24a --- /dev/null +++ b/16/bin/hub_push.sh @@ -0,0 +1,41 @@ +#!/bin/bash +VERSION=16 +MINOR=2 +VERS_BOUNCER="1.22.0" +VERS_PROBACKUP="2.5.13" +VERS_MAMONSU="3.5.5" +ACCOUNT=grufos +LATEST_PUSH='yes' + +set -euo pipefail + +if [[ $# -ne 0 ]]; then + LISTDOCKER=$@ +else + LISTDOCKER="pgbouncer postgres pgupgrade analyze mamonsu pgprobackup pgprorestore pgprocheckdb" +fi + +for param in $LISTDOCKER +do + if [ "$param" = "pgbouncer" ]; then + vers="${VERS_BOUNCER}" + elif [ "$param" = "mamonsu" ]; then + vers="${VERSION}_${VERS_MAMONSU}" + elif [[ "$param" = "pgprobackup" || $param = "pgprorestore" || $param = "pgprocheckdb" ]]; then + vers="${VERSION}.${MINOR}_${VERS_PROBACKUP}" + else + vers="${VERSION}.${MINOR}" + fi + echo "=======================" + echo "${param} -> ${vers}" + echo "=======================" + if ! docker image ls | grep "${ACCOUNT}/${param}" ; then + echo " push ..." + docker tag ${VERSION}_${param}:latest ${ACCOUNT}/${param}:latest + if [ "$LATEST_PUSH" = "yes" ]; then + docker push ${ACCOUNT}/${param}:latest + fi + docker tag ${ACCOUNT}/${param}:latest ${ACCOUNT}/${param}:${vers} + docker push ${ACCOUNT}/${param}:${vers} + fi +done diff --git a/16/bin/postgres_start.sh b/16/bin/postgres_start.sh new file mode 100644 index 0000000..6f7b0b8 --- /dev/null +++ b/16/bin/postgres_start.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +rm -rf /var/log/postgresql1/* +docker-compose -f "postgres-service.yml" up --build "$@" diff --git a/16/bin/postgres_start_all.sh b/16/bin/postgres_start_all.sh new file mode 100644 index 0000000..183f08b --- /dev/null +++ b/16/bin/postgres_start_all.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +rm -rf /var/log/pgbouncer1/* +rm -rf /var/log/postgresql1/* +rm -rf /var/log/mamonsu1/* +rm -rf /etc/pgbouncer1/* +docker-compose -f "postgres-service_all.yml" up --build "$@" diff --git a/16/bin/postgres_start_pgb.sh b/16/bin/postgres_start_pgb.sh new file mode 100644 index 0000000..8112a43 --- /dev/null +++ b/16/bin/postgres_start_pgb.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +rm -rf /var/log/pgbouncer1/* +rm -rf /var/log/postgresql1/* +rm -rf /var/log/mamonsu1/* +rm -rf /etc/pgbouncer1/* +docker-compose -f "postgres-service_pgb.yml" up --build "$@" diff --git a/16/bin/restore_start.sh b/16/bin/restore_start.sh new file mode 100644 index 0000000..3c4f78f --- /dev/null +++ b/16/bin/restore_start.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +docker-compose -f "restore-service.yml" up --build "$@" diff --git a/16/bin/show_start.sh b/16/bin/show_start.sh new file mode 100644 index 0000000..02b3214 --- /dev/null +++ b/16/bin/show_start.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# создаём все необходимые каталоги +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/pgbouncer1 /var/log/mamonsu1 /var/lib/pgsql/16_1 /usr/share/postgres/16_1/tsearch_data +chown 999:999 /var/log/postgresql1 /var/lib/pgsql/16_1 /var/log/pgbouncer1 /var/log/mamonsu1 /mnt/pgbak2 /usr/share/postgres/16_1 /usr/share/postgres/16_1/tsearch_data +clear +# запускаем сборку +docker-compose -f "show_backup-service.yml" up --build "$@" diff --git a/16/bin/upgrade_start.sh b/16/bin/upgrade_start.sh new file mode 100644 index 0000000..7ec8212 --- /dev/null +++ b/16/bin/upgrade_start.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# создаём все необходимые каталоги +SRC="15" +DEST="16" +mkdir -p /mnt/pgbak2 /var/log/postgresql1 /var/log/mamonsu1 /var/lib/pgsql/${DEST}_1 /usr/share/postgres/${DEST}_1 +chown 999:999 /mnt/pgbak2 /var/log/postgresql1 /var/log/mamonsu1 /var/lib/pgsql/${DEST}_1 /usr/share/postgres/${DEST}_1 +rm -rf /usr/share/postgres/${DEST}_1/* +rm -rf /var/lib/pgsql/${DEST}_1/* +mkdir -p /var/lib/pgsql/${DEST}_1/${DEST} /var/lib/pgsql/${DEST}_1/$SRC /usr/share/postgres/${DEST}_1/tsearch_data +chown 999:999 /var/lib/pgsql/${DEST}_1/${DEST} /var/lib/pgsql/${DEST}_1/$SRC /usr/share/postgres/${DEST}_1/tsearch_data +cp -rpf /var/lib/pgsql/${SRC}_1/* /var/lib/pgsql/${DEST}_1/$SRC +clear +# запускаем сборку +docker-compose -f "postgres-pgupgrade.yml" up --build "$@" diff --git a/16/check_cluster_service.yml b/16/check_cluster_service.yml new file mode 100644 index 0000000..f36d805 --- /dev/null +++ b/16/check_cluster_service.yml @@ -0,0 +1,27 @@ +version: '3.5' +services: + + pgprocheckdb: + build: + context: ./docker-pgprocheckdb + dockerfile: Dockerfile + + volumes: + - "/var/log/postgresql1:/var/log/postgresql" + - "/var/lib/pgsql/16_1/data:/var/lib/postgresql/data" + - "/mnt/pgbak2/:/mnt/pgbak/" + + environment: + TZ: "Europe/Moscow" + EMAILTO: "DBA-PostgreSQL@company.ru" + EMAIL_SERVER: "mail.company.ru" + EMAIL_HOSTNAME: "noreplay@myhost.ru" + EMAIL_SEND: "no" + PGHOST: "postgres" + PGPORT: "5432" + PGUSER: "postgres" + PGPASSWORD: "qweasdzxc" + BACKUP_THREADS: "4" + AMCHECK: "true" + HEAPALLINDEXED: "true" + diff --git a/16/docker-analyze/Dockerfile b/16/docker-analyze/Dockerfile new file mode 100644 index 0000000..9103523 --- /dev/null +++ b/16/docker-analyze/Dockerfile @@ -0,0 +1,47 @@ +# Based on: +# https://hub.docker.com/_/debian +# +FROM debian:bookworm-slim + +LABEL maintainer="Sergey Grinko " + +ENV DEBIAN_RELEASE bookworm +ENV PG_MAJOR 16 +ENV BACKUP_PATH /mnt/pgbak + +# explicitly set user/group IDs +RUN set -eux; \ + groupadd -r postgres --gid=999; \ + useradd -r -g postgres --uid=999 --home-dir=/var/lib/postgresql --shell=/bin/bash postgres; \ + mkdir -p /var/lib/postgresql/data; + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates wget gnupg sendemail bzip2 pgbadger \ + # ... install psql ... + && echo "deb http://apt.postgresql.org/pub/repos/apt $DEBIAN_RELEASE-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && apt-get update \ + && apt-get install -y --no-install-recommends postgresql-client-$PG_MAJOR \ + # ... create dirs ... + && mkdir -p /var/log/postgresql/report \ + && mkdir -p /var/log/pgbouncer \ + && mkdir -p /var/log/mamonsu \ + # ... cleaning ... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./analyze_log.sh /usr/local/bin +COPY ./pg_stat_statements_report.sql /var/lib/postgresql + +RUN chown postgres:postgres /var/lib/postgresql/*.sql \ + && chown -R postgres:postgres /var/log/postgresql/report \ + && chown -R postgres:postgres /var/log/pgbouncer \ + && chown -R postgres:postgres /var/log/mamonsu \ + && chmod +x /usr/local/bin/*.sh + +USER postgres +ENTRYPOINT [ "/usr/local/bin/analyze_log.sh" ] diff --git a/16/docker-analyze/analyze_log.sh b/16/docker-analyze/analyze_log.sh new file mode 100644 index 0000000..ca572e4 --- /dev/null +++ b/16/docker-analyze/analyze_log.sh @@ -0,0 +1,100 @@ +#!/bin/bash +curr_date=`date -d "-1 day" +%F` +echo "Date of processing: $curr_date" + +echo "# read names DBs ..." +DB_ALL=`psql -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} -XtqA -c "select string_agg(datname,' ') from pg_database where not datistemplate and datname not in ('postgres','mamonsu') limit 1;"` +VERSION=$PG_MAJOR + +# check paths... +PGLOG=/var/log/postgresql +INSTANCE=data +CLUSTER=$VERSION/$INSTANCE +SCRIPT_PATH=/var/lib/postgresql +REPORT_PATH=$PGLOG/report +PGDATA=/var/lib/postgresql/$CLUSTER + +if [ "$STAT_STATEMENTS" = "" ]; then + STAT_STATEMENTS="false" +fi + +mkdir -p $REPORT_PATH + +if [ "$STAT_STATEMENTS" = "true" ]; then + echo "# take statistic on duration...." + for DB in $DB_ALL ; do + echo "TOP duration statements on DB: $DB" + echo "-- ======================================================================================================== --" >> $REPORT_PATH/${curr_date}_${DB}_report.txt + echo "" >> $REPORT_PATH/${curr_date}_${DB}_report.txt + echo "TOP duration statements on DB: $DB" >> $REPORT_PATH/${curr_date}_${DB}_report.txt + echo "" >> $REPORT_PATH/${curr_date}_${DB}_report.txt + echo "psql -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} -f $SCRIPT_PATH/pg_stat_statements_report.sql -qt $DB" + psql -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} -f $SCRIPT_PATH/pg_stat_statements_report.sql -qt $DB >> $REPORT_PATH/${curr_date}_${DB}_report.txt + bzip2 -f -9 $REPORT_PATH/${curr_date}_${DB}_report.txt + done +fi + +if [ ! -f $PGLOG/postgresql-$VERSION-${curr_date}_000000.log ]; then + touch $PGLOG/postgresql-$VERSION-${curr_date}_000000.log +fi +# merge lot LOG files into one file +for file in $( ls $PGLOG/postgresql-$VERSION-${curr_date}*.log ) +do + echo "# Procesing: $file ..." + if [ $file != $PGLOG/postgresql-$VERSION-${curr_date}_000000.log ]; then + cat $file >> $PGLOG/postgresql-$VERSION-${curr_date}_000000.log + rm $file + fi +done + +echo "# take pgbadger statistics ..." +cd $REPORT_PATH +LOG_LINE_PREFIX=`psql -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} -XtqA -c "select setting from pg_settings where name ~ 'log_line_prefix';"` +if [ -f $PGLOG/postgresql-$VERSION-${curr_date}_000000.log ]; then + echo "# create statistics from logs ..." + pgbadger -f stderr --quiet --prefix "$LOG_LINE_PREFIX" --outfile ${curr_date}_pgbadger.html $PGLOG/postgresql-$VERSION-${curr_date}_000000.log + echo "# archiving..." + bzip2 -f -9 $REPORT_PATH/${curr_date}_pgbadger.html + bzip2 -f -9 $PGLOG/postgresql-$VERSION-${curr_date}_000000.log +fi + +echo "# Clean the old archives of logs .... Store the last 30 files..." +ls -t $PGLOG/postgresql*.log.bz2 | tail -n +31 | xargs -I{} rm {} +echo "# Clean the old report archives .... Store the last 30 files..." +ls -t $REPORT_PATH/*.html.bz2 | tail -n +31 | xargs -I{} rm {} +ls -t $REPORT_PATH/*.txt.bz2 | tail -n +31 | xargs -I{} rm {} + +if [ "$STAT_STATEMENTS" = "true" ]; then + echo "# reset sql statements statistic ...." + for DB in $DB_ALL ; do + echo "exec pg_stat_statements_reset() on DB: $DB" + psql -qt -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} -c "select pg_stat_statements_reset();" $DB + done +fi + +DAY=`date -d "-1 day" +%a` + +if [ -f /var/log/mamonsu/mamonsu.log ]; then + echo "" + echo "# -- =========================== --" + echo "# mamonsu.log -> mamonsu.log.$DAY.bz ..." + mv -f /var/log/mamonsu/mamonsu.log /var/log/mamonsu/mamonsu.log.$DAY + bzip2 -f -9 /var/log/mamonsu/mamonsu.log.$DAY + echo "# please restart" + echo "# container mamonsu..." + echo "# -- =========================== --" +fi +if [ -f /var/log/pgbouncer/pgbouncer.log ]; then + echo "" + echo "# -- =========================== --" + echo "# pgbouncer.log -> pgbouncer.log.$DAY.bz ..." + mv -f /var/log/pgbouncer/pgbouncer.log /var/log/pgbouncer/pgbouncer.log.$DAY + bzip2 -f -9 /var/log/pgbouncer/pgbouncer.log.$DAY + echo "# -- =========================== --" +fi +echo "" +echo "# -- =========================== --" +echo "# please send HUP signal" +echo "# to container pgbouncer..." +echo "# -- =========================== --" + diff --git a/16/docker-analyze/pg_stat_statements_report.sql b/16/docker-analyze/pg_stat_statements_report.sql new file mode 100644 index 0000000..e0b2fc7 --- /dev/null +++ b/16/docker-analyze/pg_stat_statements_report.sql @@ -0,0 +1,172 @@ +--Slowest Queries Report (requires pg_stat_statements) + +--Original version – Data Egret: https://github.com/dataegret/pg-utils/blob/master/sql/global_reports/query_stat_total.sql +with pg_stat_statements_slice as ( + select * + from pg_stat_statements + -- if current database is postgres then generate report for all databases, + -- otherwise generate for current database only + where + current_database() = 'postgres' + or dbid = ( + select oid + from pg_database + where datname = current_database() + ) +), pg_stat_statements_normalized as ( + select + *, + translate( + regexp_replace( + regexp_replace( + regexp_replace( + regexp_replace( + query, + e'\\?(::[a-zA-Z_]+)?( *, *\\?(::[a-zA-Z_]+)?)+', '?', 'g' + ), + e'\\$[0-9]+(::[a-zA-Z_]+)?( *, *\\$[0-9]+(::[a-zA-Z_]+)?)*', '$N', 'g' + ), + e'--.*$', '', 'ng' + ), + e'/\\*.*?\\*/', '', 'g' + ), + e'\r', '' + ) as query_normalized + from pg_stat_statements_slice +), totals as ( + select + sum(total_exec_time) as total_exec_time, + sum(blk_read_time+blk_write_time) as io_time, + sum(total_exec_time-blk_read_time-blk_write_time) as cpu_time, + sum(calls) as ncalls, + sum(rows) as total_rows + from pg_stat_statements_slice +), _pg_stat_statements as ( + select + (select datname from pg_database where oid = p.dbid) as database, + (select rolname from pg_roles where oid = p.userid) as username, + --select shortest query, replace \n\n-- strings to avoid email clients format text as footer + substring( + translate( + replace( + (array_agg(query order by length(query)))[1], + e'-- \n', + e'--\n' + ), + e'\r', '' + ), + 1, + 8192 + ) as query, + sum(total_exec_time) as total_exec_time, + sum(blk_read_time) as blk_read_time, sum(blk_write_time) as blk_write_time, + sum(calls) as calls, + sum(rows) as rows, + min(min_exec_time) as min_exec_time, + max(max_exec_time) as max_exec_time, + avg(stddev_exec_time) as stddev_exec_time + from pg_stat_statements_normalized p + group by dbid, userid, md5(query_normalized) +), totals_readable as ( + select + to_char(interval '1 millisecond' * total_exec_time, 'HH24:MI:SS') as total_exec_time, + (100*io_time/total_exec_time)::numeric(20,2) as io_time_percent, + to_char(ncalls, 'FM999,999,999,990') as total_queries, + (select to_char(count(distinct md5(query)), 'FM999,999,990') from _pg_stat_statements) as unique_queries + from totals +), statements as ( + select + (100*total_exec_time/(select total_exec_time from totals)) as time_percent, + (100*(blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals)) as io_time_percent, + (100*(total_exec_time-blk_read_time-blk_write_time)/(select cpu_time from totals)) as cpu_time_percent, + to_char(interval '1 millisecond' * total_exec_time, 'HH24:MI:SS') as total_exec_time, + (total_exec_time::numeric/calls)::numeric(20,2) as avg_time, + ((total_exec_time-blk_read_time-blk_write_time)::numeric/calls)::numeric(20, 2) as avg_cpu_time, + ((blk_read_time+blk_write_time)::numeric/calls)::numeric(20, 2) as avg_io_time, + to_char(calls, 'FM999,999,999,990') as calls, + (100*calls/(select ncalls from totals))::numeric(20, 2) as calls_percent, + to_char(rows, 'FM999,999,999,990') as rows, + (100*rows/(select total_rows from totals))::numeric(20, 2) as row_percent, + min_exec_time::numeric(20, 2) as min_time, + max_exec_time::numeric(20, 2) as max_time, + stddev_exec_time::numeric(20, 2) as stddev_time, + database, + username, + query + from _pg_stat_statements + where + (total_exec_time-blk_read_time-blk_write_time)/(select cpu_time from totals) >= 0.01 + or (blk_read_time+blk_write_time)/( + select greatest(io_time, 1) from totals + ) >= 0.01 + or calls/(select ncalls from totals) >= 0.02 + or rows/(select total_rows from totals) >= 0.02 + union all + select + (100*sum(total_exec_time)::numeric/(select total_exec_time from totals)) as time_percent, + (100*sum(blk_read_time+blk_write_time)::numeric/(select greatest(io_time, 1) from totals)) as io_time_percent, + (100*sum(total_exec_time-blk_read_time-blk_write_time)::numeric/(select cpu_time from totals)) as cpu_time_percent, + to_char(interval '1 millisecond' * sum(total_exec_time), 'HH24:MI:SS') as total_exec_time, + (sum(total_exec_time)::numeric/sum(calls))::numeric(20,2) as avg_time, + (sum(total_exec_time-blk_read_time-blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_cpu_time, + (sum(blk_read_time+blk_write_time)::numeric/sum(calls))::numeric(20, 2) as avg_io_time, + to_char(sum(calls), 'FM999,999,999,990') as calls, + (100*sum(calls)/(select ncalls from totals))::numeric(20, 2) as calls_percent, + to_char(sum(rows), 'FM999,999,999,990') as rows, + (100*sum(rows)/(select total_rows from totals))::numeric(20, 2) as row_percent, + (min(min_exec_time))::numeric(20, 2) as min_time, + (max(max_exec_time))::numeric(20, 2) as max_time, + (avg(stddev_exec_time))::numeric(20, 2) as stddev_time, + 'all' as database, + 'all' as username, + 'other' as query + from _pg_stat_statements + where + not ( + (total_exec_time-blk_read_time-blk_write_time)/(select cpu_time from totals) >= 0.01 + or (blk_read_time+blk_write_time)/(select greatest(io_time, 1) from totals) >= 0.01 + or calls/(select ncalls from totals)>=0.02 or rows/(select total_rows from totals) >= 0.02 + ) +) +, statements_readable as ( + select row_number() over (order by s.time_percent desc) as pos, + to_char(time_percent, 'FM990.0') || '%' as time_percent, + to_char(io_time_percent, 'FM990.0') || '%' as io_time_percent, + to_char(cpu_time_percent, 'FM990.0') || '%' as cpu_time_percent, + to_char(avg_io_time*100/(coalesce(nullif(avg_time, 0), 1)), 'FM990.0') || '%' as avg_io_time_percent, + total_exec_time, avg_time, avg_cpu_time, avg_io_time, calls, calls_percent, rows, row_percent, + min_time, max_time, stddev_time, + database, username, query + from statements s + where calls is not null +) +select + e'total time:\t' || total_exec_time || ' (IO: ' || io_time_percent || E'%)\n' + || e'total queries:\t' || total_queries || ' (unique: ' || unique_queries || E')\n' + || 'report for ' || (select case when current_database() = 'postgres' then 'all databases' else current_database() || ' database' end) + || E', version b0.9.6' + || ' @ PostgreSQL ' + || (select setting from pg_settings where name='server_version') || E'\ntracking ' + || (select setting from pg_settings where name='pg_stat_statements.track') || ' ' + || (select setting from pg_settings where name='pg_stat_statements.max') || ' queries, utilities ' + || (select setting from pg_settings where name='pg_stat_statements.track_utility') + || ', logging ' || (select (case when setting = '0' then 'all' when setting = '-1' then 'none' when setting::int > 1000 then (setting::numeric/1000)::numeric(20, 1) || 's+' else setting || 'ms+' end) from pg_settings where name='log_min_duration_statement') + || E' queries\n' + || ( + select coalesce(string_agg('WARNING: database ' || datname || ' must be vacuumed within ' || to_char(2147483647 - age(datfrozenxid), 'FM999,999,999,990') || ' transactions', E'\n' order by age(datfrozenxid) desc) || E'\n', '') + from pg_database where (2147483647 - age(datfrozenxid)) < 200000000 + ) || E'\n' +from totals_readable +union all +( +select + e'=============================================================================================================\n' + || 'pos:' || pos || E'\t total time: ' || total_exec_time || ' (' || time_percent || ', CPU: ' || cpu_time_percent || ', IO: ' || io_time_percent + || E')\t calls: ' || calls || ' (' || calls_percent || E'%)\t avg_time: ' || avg_time || 'ms (IO: ' || avg_io_time_percent || E')\n' + || E'\t\t( min_time: ' || min_time || 'ms, ' || 'max_time: ' || max_time || 'ms, ' || 'stddev_time: ' || stddev_time || 'ms )' + || E'\n' || 'user: ' || username || E'\t db: ' || database || E'\t rows: ' || rows || ' (' || row_percent || '%)' || E'\t query:\n' + || query || E'\n' +from statements_readable +order by pos +); + diff --git a/16/docker-mamonsu/Dockerfile b/16/docker-mamonsu/Dockerfile new file mode 100644 index 0000000..64dd0ae --- /dev/null +++ b/16/docker-mamonsu/Dockerfile @@ -0,0 +1,74 @@ +# Based on: +# https://hub.docker.com/_/debian +# +FROM debian:bookworm-slim + +LABEL maintainer="Sergey Grinko " + +ENV DEBIAN_RELEASE bookworm +ENV PG_MAJOR 16 +ENV BACKUP_PATH /mnt/pgbak +# version mamonsu +ENV VERSION 3.5.5 + +# explicitly set user/group IDs +RUN set -eux; \ + groupadd -r postgres --gid=999; \ + useradd -r -g postgres --uid=999 --home-dir=/var/lib/postgresql --shell=/bin/bash postgres; \ + mkdir -p /var/lib/postgresql/data; + +COPY ./pg_probackup.py /usr/local/bin + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates wget gnupg sendemail dumb-init make dpkg-dev debhelper python3-dev python3-setuptools\ + # ... install psql ... + && echo "deb http://apt.postgresql.org/pub/repos/apt $DEBIAN_RELEASE-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + && apt-get update \ + && apt-get install -y --no-install-recommends postgresql-client-$PG_MAJOR \ + # ... install mamonsu ... + && mkdir -p /etc/mamonsu \ + && cd /tmp \ + && mkdir -p build_mamonsu \ + && cd /tmp/build_mamonsu \ + && wget --quiet https://github.com/postgrespro/mamonsu/archive/refs/tags/$VERSION.tar.gz \ + && tar xzf /tmp/build_mamonsu/$VERSION.tar.gz --directory /tmp/build_mamonsu/ \ + && cp /usr/local/bin/pg_probackup.py /tmp/build_mamonsu/mamonsu-$VERSION/mamonsu/plugins/system/linux/pg_probackup.py \ + && cd /tmp/build_mamonsu/mamonsu-$VERSION && python3 setup.py build && python3 setup.py install \ + && ln -s /usr/local/bin/mamonsu /usr/bin/mamonsu \ + && mkdir -p /var/log/mamonsu \ + && chown -R postgres:postgres /var/log/mamonsu \ + # подключаем репозитарий архивной утилиты + && echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $DEBIAN_RELEASE main-$DEBIAN_RELEASE" > /etc/apt/sources.list.d/pg_probackup.list \ + && wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | tee /etc/apt/trusted.gpg.d/pg_probackup.asc \ + # ... updating ... + && apt-get update \ + && apt-get install -y --no-install-recommends \ + pg-probackup-$PG_MAJOR \ + && mkdir -p $BACKUP_PATH \ + && chown -R postgres:postgres $BACKUP_PATH \ + && chown -R postgres:postgres /etc/mamonsu /var/log/mamonsu \ + # ... cleaning ... + # git* даёт ошибку!!! Не менять + && apt-get purge -y git mariadb* make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && rm -rf /etc/mamonsu/* \ + && apt-get purge -y make dpkg-dev debhelper \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./mamonsu_start.sh /usr/local/bin +COPY ./agent.conf /usr/local/bin/agent.conf.tmpl +COPY ./pg_partition.py /usr/local/bin/pg_partition.py.tmpl +COPY ./pg_jobs_check.py /usr/local/bin/pg_jobs_check.py.tmpl +COPY ./pre.sql /var/lib/postgresql +COPY ./mamonsu_right_add.sql /var/lib/postgresql +COPY ./bootstrap_post.sql /var/lib/postgresql + +RUN chown postgres:postgres /var/lib/postgresql/*.sql \ + && chmod +x /usr/local/bin/*.sh + +USER postgres +ENTRYPOINT [ "dumb-init", "/usr/local/bin/mamonsu_start.sh" ] diff --git a/16/docker-mamonsu/agent.conf b/16/docker-mamonsu/agent.conf new file mode 100644 index 0000000..8b4142b --- /dev/null +++ b/16/docker-mamonsu/agent.conf @@ -0,0 +1,211 @@ +# This is a configuration file for mamonsu +# To get more information about mamonsu, visit https://postgrespro.ru/docs/postgrespro/12/mamonsu + +######### Connection parameters sections ############## + +# specify connection parameters for the Postgres cluster +# in the user, password, and database fields, you must specify the mamonsu_user, mamonsu_password, +# and the mamonsu_database used for bootstrap, respectively. +# if you skipped the bootstrap, specify a superuser credentials and the database to connect to. + +[postgres] +enabled = True +user = mamonsu +password = MAMONSU_PASSWORD +database = mamonsu +host = PGHOST +port = PGPORT +application_name = mamonsu +query_timeout = 60 + +# the address field must point to the running Zabbix server, while the client field must provide the name of +# the Zabbix host. You can find the list of hosts available for your account in the Zabbix web +# interface under Configuration > Hosts. +# re_send - True - in case of transmission error, mamonsu repeats sending metrics one by one to look in log metrics with error + +[zabbix] +enabled = True +client = CLIENT_HOSTNAME +address = ZABBIX_SERVER_IP +port = ZABBIX_SERVER_PORT +timeout = 15 +re_send = False + +######### General parameters sections ############ + +# enable or disable collection of system metrics. + +[system] +enabled = True + +# control the queue size of the data to be sent to the Zabbix server + +[sender] +queue = 2048 + +# specify the location of mamonsu and whether it is allowed to access metrics from the command line + +[agent] +enabled = True +host = MAMONSU_AGENTHOST +port = 10052 + +# specify custom plugins to be added for metrics collection + +[plugins] +enabled = True +directory = /etc/mamonsu/plugins + +# enable storing the collected metric data in text files locally. + +[metric_log] +enabled = False +directory = /var/log/mamonsu +max_size_mb = 1024 + +# specify logging settings for mamonsu + +[log] +file = /var/log/mamonsu/mamonsu.log +level = INFO +format = [%(levelname)s] %(asctime)s - %(name)s - %(message)s + +######### Individual Plugin Sections ############ + +# to disable any plugin set the enabled option to False. +# modify collection interval for each plugin in the interval field. +# set customer parameters for some plugins in the individual section. +# below listed all available parameters for each plugin to modify. + +[health] +interval = 60 + +[archivecommand] +interval = 60 + +# Besides standard autovacuum workers count, mamonsu also counts autovacuum utilization. +# But this metric is instantaneous, so recommended to run this plugin frequently +# to get a complete picture of autovacuum utilization. +[autovacuum] +interval = 30 + +[bgwriter] +interval = 60 + +[cfs] +force_enable = False +interval = 60 + +[checkpoint] +interval = 300 + +[connections] +interval = 60 + +[databases] +interval = 300 + +[pghealth] +interval = 60 + +[instance] +interval = 60 + +# This plugin allows detects possible memory leaks while working with PostgreSQL using /proc/pid/status and /proc/pid/statm +# We use RES and SHR difference to calculate approximate volume of private anonymous backend memory. +# If it exceeds private_anon_mem_threshold then that pid will be added to a message. An example is presented below +# statm - 'pid: {pid}, RES {RES} - SHR {SHR} more then {private_anon_mem_threshold}\n' +# Since Linux 4.5 RssAnon, RssFile and RssShmem have been added. +# They allows to distinguish types of memory such as private anonymous, file-backed, and shared anonymous memory. +# We are interested in RssAnon. If its value exceeds private_anon_mem_threshold then that pid will also be added to a message. +# By default this plugin disabled. To enable this plugin - set bellow "enabled = False" +# #interval - (onitoring frequency in seconds. 60 seconds by default +# private_anon_mem_threshold - memory volume threshold after which we need an investigation about memory leak. 1GB by default. +# Possible values MB, GB, TB. For example 1GB +[memoryleakdiagnostic] +enabled = MEMORYLEAKDIAGNOSTIC_ENABLED +interval = 60 +private_anon_mem_threshold = MEMORYLEAKDIAGNOSTIC_THRESHOLD + +[oldest] +interval = 60 + +[pgbuffercache] +interval = INTERVAL_PGBUFFERCACHE +[pglocks] +interval = 60 + +# Get age (in seconds) of the oldest running prepared transaction and number of all prepared transactions for two-phase commit. +# https://www.postgresql.org/docs/current/sql-prepare-transaction.html +# https://www.postgresql.org/docs/12/view-pg-prepared-xacts.html +# max_prepared_transaction_time - age of prepared transaction in seconds. +# If pgsql.prepared.oldest exceeds max_prepared_transaction_time the trigger fires. +[preparedtransaction] +interval = 60 + +# Get size of relations defined in this section +# Relations - comma separated list of objects - tables and endexes (database_name.schema.relation) used to calculate relations size. +# Example: +# relations=postgres.pg_catalog.pg_class,postgres.pg_catalog.pg_user +# If the relation is blocked by some process such as vacuum full or create index, the result will be -1 +# by default this plugin disabled. To enable this plugin - set bellow "enabled = False" and define a list of relations. +[relationssize] +enabled = False +relations = RELATIONS_RELATIONSSIZE +interval = 300 + +[replication] +interval = 60 + +[statstatements] +interval = 60 + +[waitsampling] +interval = 60 + +[wal] +interval = 60 + +[disksizes] +interval = 60 + +[diskstats] +interval = 60 + +[la] +interval = 60 + +[memory] +interval = 60 + +[net] +interval = 60 + +[openfiles] +interval = 60 + +# Get size of backup catalogs stroring all WAL and backup files using pg_probackup +# (https://github.com/postgrespro/pg_probackup) +# Trigger fires if some backup has bad status e.g. (ERROR,CORRUPT,ORPHAN). +[pgprobackup] +enabled = PGPROBACKUP_ENABLED +interval = 300 +backup_dirs = /mnt/pgbak +pg_probackup_path = /usr/bin/pg_probackup-PGPROBACKUP_PG_MAJOR +max_time_run_backup2alert_in_sec = 14400 +max_time_lack_backup2alert_in_sec = 97200 + +[procstat] +interval = 60 + +[systemuptime] +interval = 60 + +[agentapi] +interval = 60 + +[logsender] +interval = 2 + +[zbxsender] +interval = 10 diff --git a/16/docker-mamonsu/bootstrap_post.sql b/16/docker-mamonsu/bootstrap_post.sql new file mode 100644 index 0000000..07558c2 --- /dev/null +++ b/16/docker-mamonsu/bootstrap_post.sql @@ -0,0 +1 @@ +select 'GRANT EXECUTE ON FUNCTION mamonsu.' || oid::regprocedure || ' TO mamonsu;' from pg_proc where pronamespace = 'mamonsu'::regnamespace \gexec diff --git a/16/docker-mamonsu/mamonsu_right_add.sql b/16/docker-mamonsu/mamonsu_right_add.sql new file mode 100644 index 0000000..893b041 --- /dev/null +++ b/16/docker-mamonsu/mamonsu_right_add.sql @@ -0,0 +1,32 @@ +select not pg_is_in_recovery() as is_master \gset +\if :is_master + CREATE EXTENSION IF NOT EXISTS pg_buffercache; + CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + GRANT USAGE ON SCHEMA pg_catalog TO mamonsu; + GRANT SELECT ON TABLE pg_proc TO mamonsu; + select current_database() = 'mamonsu' as is_mamonsu_db \gset + \if :is_mamonsu_db + select '''' || case when current_setting('shared_buffers') like '%GB' + then (replace(current_setting('shared_buffers'), 'GB', '')::int)*1024 + else replace(current_setting('shared_buffers'), 'MB', '')::int + end * 0.0117 || ' MB''' as highpage_mb \gset + ALTER FUNCTION mamonsu.buffer_cache() SET WORK_MEM = :highpage_mb; -- for shared_buffers 16 Гб 200 Мб + GRANT USAGE ON SCHEMA public TO mamonsu; + GRANT EXECUTE ON FUNCTION public.pg_stat_statements(boolean) TO mamonsu; + \endif + -- we give the right to connect for the role of mamonsu + do $$ begin execute 'GRANT CONNECT ON DATABASE "' || current_database() || '" TO mamonsu; '; end $$; + -- + CREATE SCHEMA IF NOT EXISTS pgbouncer; + GRANT CONNECT ON DATABASE mamonsu TO pgbouncer; + GRANT USAGE ON SCHEMA pgbouncer TO pgbouncer; +CREATE OR REPLACE FUNCTION pgbouncer.user_lookup(p_username text, OUT uname text, OUT phash text) RETURNS record + LANGUAGE plpgsql SECURITY DEFINER + AS $$ +BEGIN + SELECT usename, passwd FROM pg_catalog.pg_shadow + WHERE usename = p_username INTO uname, phash; + RETURN; +END; +$$; +\endif diff --git a/16/docker-mamonsu/mamonsu_start.sh b/16/docker-mamonsu/mamonsu_start.sh new file mode 100644 index 0000000..35025a2 --- /dev/null +++ b/16/docker-mamonsu/mamonsu_start.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +set -e + +CONFIG_DIR=/etc/mamonsu + +if [ ! -f ${CONFIG_DIR}/agent.conf ]; then + cp -f /usr/local/bin/agent.conf.tmpl ${CONFIG_DIR}/agent.conf +fi +mkdir -p ${CONFIG_DIR}/plugins +if [ ! -f ${CONFIG_DIR}/plugins/pg_partition.py ]; then + cp -f /usr/local/bin/pg_partition.py.tmpl ${CONFIG_DIR}/plugins/pg_partition.py +fi +if [ ! -f ${CONFIG_DIR}/plugins/pg_jobs_check.py ]; then + cp -f /usr/local/bin/pg_jobs_check.py.tmpl ${CONFIG_DIR}/plugins/pg_jobs_check.py +fi +if [ ! -f ${CONFIG_DIR}/plugins/__init__.py ]; then + touch ${CONFIG_DIR}/plugins/__init__.py +fi + +# ... correct mamonsu conf file ... +sed -i "s/host = PGHOST/host = ${PGHOST:-127.0.0.1}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/password = MAMONSU_PASSWORD/password = ${MAMONSU_PASSWORD:-None}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/port = PGPORT/port = ${PGPORT:-5432}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/client = CLIENT_HOSTNAME/client = $CLIENT_HOSTNAME/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/address = ZABBIX_SERVER_IP/address = $ZABBIX_SERVER_IP/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/port = ZABBIX_SERVER_PORT/port = ${ZABBIX_SERVER_PORT:-10051}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/interval = INTERVAL_PGBUFFERCACHE/interval = ${INTERVAL_PGBUFFERCACHE:-1200}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/enabled = PGPROBACKUP_ENABLED/enabled = ${PGPROBACKUP_ENABLED:-False}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/pg_probackup_path = \/usr\/bin\/pg_probackup-PGPROBACKUP_PG_MAJOR/pg_probackup_path = \/usr\/bin\/pg_probackup-${PG_MAJOR}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/host = MAMONSU_AGENTHOST/host = ${MAMONSU_AGENTHOST:-127.0.0.1}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/relations = RELATIONS_RELATIONSSIZE/relations = ${RELATIONS_RELATIONSSIZE:-postgres.pg_catalog.pg_class,postgres.pg_catalog.pg_user}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/enabled = MEMORYLEAKDIAGNOSTIC_ENABLED/enabled = ${MEMORYLEAKDIAGNOSTIC_ENABLED:-False}/g" ${CONFIG_DIR}/agent.conf \ + && sed -i "s/private_anon_mem_threshold = MEMORYLEAKDIAGNOSTIC_THRESHOLD/private_anon_mem_threshold = ${MEMORYLEAKDIAGNOSTIC_THRESHOLD:-4GB}/g" ${CONFIG_DIR}/agent.conf + +# Create the 'mamonsu' DB and get all list DBs +DB_ALL=`psql -qAXt -f /var/lib/postgresql/pre.sql -v MAMONSU_PASSWORD="$MAMONSU_PASSWORD"` + +# Name of the table version +TABLE_CHECK="${VERSION/'.'/'_'}" +# Dual pass to remove the second point +TABLE_CHECK="timestamp_master_${TABLE_CHECK/'.'/'_'}" +if psql -qtAX -c "select case when not pg_is_in_recovery() and not exists(select * from pg_class where relname = '$TABLE_CHECK') then 1 else 0 end as master" mamonsu | grep '1' ; then + echo "bootstrap DB mamonsu ..." + if [ "$PGPASSWORD" = "" ]; then + /usr/bin/mamonsu bootstrap -M mamonsu -U postgres -x -d mamonsu --port=${PGPORT:-5432} --host=${PGHOST:-127.0.0.1}; + else + /usr/bin/mamonsu bootstrap -M mamonsu -U postgres -x -d mamonsu --port=${PGPORT:-5432} --password=$PGPASSWORD --host=${PGHOST:-127.0.0.1}; + fi + psql -qtAX --dbname="mamonsu" -f /var/lib/postgresql/bootstrap_post.sql +fi + +# setup DBs for monitoring at mamonsu +for DB in $DB_ALL ; do + echo "Updating '$DB'" + psql -qtAX --dbname="$DB" -f /var/lib/postgresql/mamonsu_right_add.sql +done + +# generate templates... +cd ${CONFIG_DIR} +/usr/bin/mamonsu export template template.xml --add-plugins ${CONFIG_DIR}/plugins + +# start services... +cd / +exec /usr/bin/mamonsu -a ${CONFIG_DIR}/plugins -c ${CONFIG_DIR}/agent.conf diff --git a/16/docker-mamonsu/metrics.ru.md b/16/docker-mamonsu/metrics.ru.md new file mode 100644 index 0000000..6dd04b9 --- /dev/null +++ b/16/docker-mamonsu/metrics.ru.md @@ -0,0 +1,82 @@ +# Описания плагинов + +## pg_probackup.py +Предназначен для контроля за состоянием каталогов бэкапов создаваемых утилитой [pg_probackup](https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup). +Плагин адаптирован для контроля нескольких инстансов в одном каталоге. Имя инстанса указывается в ключе метрики как подкаталог. + +### Настройки в секции [pgprobackup] + +| Наименование | Ключ | Описание | +| --------------------------------- | ------------------------- | ------------------------------------------------------------------ | +| enabled | False | По умолчанию плагин отключен. Укажите True для включения | +| interval | 900 | Как часто опрашивать состояние каталогов. Указано в секундах | +| backup_dirs | /backup_dir1,/backup_dir2 | Список каталогов бэкапов утилиты pg_probackup | +| pg_probackup_path | /usr/bin/pg_probackup-13 | Полный путь к утилите создания бэкапов pg_probackup | +| max_time_run_backup2alert_in_sec | 21600 | Время срабатывания алерта "Backup runs too long on..." в секундах. | +| max_time_lack_backup2alert_in_sec | 100800 | Время срабатывания алерта "Long time no backups on..." в секундах. | + + +### Текущие метрики в Discovery правиле: + +| Наименование | Ключ | Хранить | Описание | +| ---------------------------------------------------------- | ------------------------------------------------ | ------- | -------------------------------------------------------- | +| Pg_probackup dir {#BACKUPDIR}: size | pg_probackup.dir.size[{#BACKUPDIR}] | 31d | Общий размер каталога: /backups + /wal | +| Pg_probackup dir {#BACKUPDIR}/backups: size | pg_probackup.dir.size[{#BACKUPDIR}/backups] | 31d | Размер подкаталога /backups | +| Pg_probackup dir {#BACKUPDIR}/wal: size | pg_probackup.dir.size[{#BACKUPDIR}/wal] | 31d | Размер подкаталога /wal | +| Pg_probackup dir {#BACKUPDIR}: duration full backup | pg_probackup.dir.duration_full[{#BACKUPDIR}] | 31d | Длительность в секундах создания полного бэкапа | +| Pg_probackup dir {#BACKUPDIR}: duration incremental backup | pg_probackup.dir.duration_inc[{#BACKUPDIR}] | 31d | Длительность в секундах создания инкрементального бэкапа | +| Pg_probackup dir {#BACKUPDIR}: start time backup | pg_probackup.dir.start_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) старта создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: end time backup | pg_probackup.dir.end_time_backup[{#BACKUPDIR}] | | Время (UNIXTIME) завершения создания бэкапа | +| Pg_probackup dir {#BACKUPDIR}: mode | pg_probackup.dir.mode_backup[{#BACKUPDIR}] | | Текущий режим бэкапа | +| Pg_probackup dir {#BACKUPDIR}: status | pg_probackup.dir.status_backup[{#BACKUPDIR}] | | Текущий статус бэкапа | +| Pg_probackup dir {#BACKUPDIR}: error | pg_probackup.dir.error[{#BACKUPDIR}] | | Признак ошибочного состояния или "ok" если всё хорошо | + + +### Текущие алерты в Discovery правиле: +Созданы следующие алерты, позволящие контролировать состояние архивных каталогов: + +* Алерт срабатывает если создание бэкапа выполняется больше, чем указано в настроечном параметре `max_time_run_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 21600 (6 часов). Контролируется текущее состояние в котором находится процесс создания бэкапной копии. + +| Категория | Детали | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Важность: | Warning | +| Наименование: | Backup runs too long on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} (RUNNING) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING" and ( {PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{PostgresPro-Linux:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()}) > max_time_run_backup2alert_in_sec | + +* Алерт срабатывает если не выполняется создание нового бэкапа дольше, чем указано в настроечном параметре `max_time_lack_backup2alert_in_sec`. Время задаётся в секундах и значение по умолчанию = 100800 (28 часов). Контролируется, что очередной бэкап (тип бэкапа любой) будет создан не позже, чем указано в параметре. + +| Категория | Детали | +| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Важность: | Warning | +| Наименование: | Long time no backups on {HOSTNAME} in pg_probackup dir {#BACKUPDIR} | +| Выражение: | ( {PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()} -{PostgresPro-Linux:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()}) > max_time_lack_backup2alert_in_sec | + +* Алерт срабатывает если при создании бэкапа произошла ошибка - 'ERROR', 'CORRUPT', 'ORPHAN'. Контролирует состояние любой архивной копии, не только последней. Активен всё время пока есть любая архивная копия с ошибочным состоянием. + +| Категория | Детали | +| ------------- | ----------------------------------------------------------------------------------- | +| Важность: | Average | +| Наименование: | Error in pg_probackup dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE}) | +| Выражение: | {PostgresPro-Linux:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1 | + + +### Текущие графики в Discovery правиле: + +1. Pg_probackup: backup dir: {#BACKUPDIR} size + +Показывает 3 метрики с информацией о размерах каталогов с архивными копиями: + +| Метрика | Сторона графика | Описание | +| ------------------------------------------- | --------------- | -------------------------------------- | +| pg_probackup.dir.size[{#BACKUPDIR}] | (Left Side) | Общий размер каталогов /backups + /wal | +| pg_probackup.dir.size[{#BACKUPDIR}/backups] | (Left Side) | размер подкаталога /backups | +| pg_probackup.dir.size[{#BACKUPDIR}/wal] | (Right Side) | размер подкаталога /wal | + +2. Pg_probackup: backup dir: {#BACKUPDIR} duration + +Показывает 2 метрики с длительностью создания архивных копий: + +| Метрика | Сторона графика | Описание | +| -------------------------------------------- | --------------- | -------------------------------------------------------- | +| pg_probackup.dir.duration_full[{#BACKUPDIR}] | (Left Side) | Длительность в секундах создания полного бэкапа | +| pg_probackup.dir.duration_inc[{#BACKUPDIR}] | (Right Side) | Длительность в секундах создания инкрементального бэкапа | diff --git a/16/docker-mamonsu/pg_jobs_check.py b/16/docker-mamonsu/pg_jobs_check.py new file mode 100644 index 0000000..c223603 --- /dev/null +++ b/16/docker-mamonsu/pg_jobs_check.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin +from mamonsu.plugins.pgsql.pool import Pooler + +class PgJobsCheck(Plugin): + Interval = 60 + + DEFAULT_CONFIG = { + "interval_check": "1 day" # На какой промежуток времени в прошлом искать ошибки + } + + # получаем список всех БД + query_agent_discovery = "select datname from pg_catalog.pg_database where datistemplate = false" + # контролируем ошибки для конкретной БД + query = "select count(*) from cron.get_job_run_details('{1}','{0}'::interval) where status = 'failed' and not pg_is_in_recovery()" + query_table_exists = "select 1 from pg_class where relname='job_run_details' and relnamespace=(select oid from pg_namespace where nspname='cron')" + + AgentPluginType = 'pg' + key_db = 'pgsql.jobs.error' + key_db_discovery = key_db+'{0}' + + def run(self, zbx): + dbs = [] + test_table = 0 + for row in Pooler.query(self.query_table_exists, 'postgres'): + test_table = row[0] + if test_table > 0 : + for info_dbs in Pooler.query(self.query_agent_discovery): + dbs.append({"{#DATABASE}": info_dbs[0]}) + # проверяем наличие ошибок в каждой БД + err_count = 0 # пока ошибок нет + # self.log.info('jobs[sql]='+ self.query.format(self.plugin_config("interval_check"), info_dbs[0])) + for info_rows in Pooler.query(self.query.format(self.plugin_config("interval_check"), info_dbs[0]), 'postgres'): + err_count = int(info_rows[0]) # есть ошибки, фиксируем + # self.log.info('info_rows[{0}]={1} '.format(info_dbs[0], info_rows)) + zbx.send(self.key_db+"[{0}]".format(info_dbs[0]), err_count) + zbx.send(self.key_db+'[]', zbx.json({'data': dbs})) + + def discovery_rules(self, template, dashboard=False): + rule = { + "name": "PostgreSQL JOBs error Discovery", + "key": self.key_db_discovery.format("[{0}]".format(self.Macros[self.Type])), + + } + if Plugin.old_zabbix: + conditions = [] + rule["filter"] = "{#DATABASE}:.*" + else: + conditions = [{ + "condition": [ + {"macro": "{#DATABASE}", + "value": ".*", + "operator": 8, + "formulaid": "A"} + ] + }] + items = [ + {"key": self.right_type(self.key_db_discovery, var_discovery="{#DATABASE},"), + "name": "PostgreSQL JOBs in {#DATABASE}: error count", + 'units': Plugin.UNITS.none, + 'value_type': Plugin.VALUE_TYPE.numeric_unsigned, + 'delay': self.Interval} + ] + triggers = [{ + 'name': 'PostgreSQL: In Database {#DATABASE} on {HOSTNAME} JOBs error (value={ITEM.LASTVALUE})', + 'expression': '{#TEMPLATE:'+self.right_type(self.key_db_discovery, var_discovery="{#DATABASE},")+'.last()}>0' + } + ] + return template.discovery_rule(rule=rule, conditions=conditions, items=items, triggers=triggers) + + def keys_and_queries(self, template_zabbix): + result = ['{0},$2 $1 -c "{1}"'.format(self.key_db_discovery.format("[*]"), self.query_agent_discovery), + ] + return template_zabbix.key_and_query(result) diff --git a/16/docker-mamonsu/pg_partition.py b/16/docker-mamonsu/pg_partition.py new file mode 100644 index 0000000..e8fc07d --- /dev/null +++ b/16/docker-mamonsu/pg_partition.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +from mamonsu.plugins.pgsql.plugin import PgsqlPlugin as Plugin +from mamonsu.plugins.pgsql.pool import Pooler + +class PgPartitionDefRows(Plugin): + Interval = 60*20 + query_agent_discovery = """WITH RECURSIVE inheritance_tree AS ( + SELECT c_1.oid AS table_oid, + n.nspname AS table_schema, + c_1.relname AS table_name, + NULL::name AS table_parent_schema, + NULL::name AS table_parent_name, + c_1.relispartition AS is_partition + FROM pg_class c_1 + JOIN pg_namespace n ON n.oid = c_1.relnamespace + WHERE c_1.relkind = 'p'::"char" AND c_1.relispartition = false + UNION ALL + SELECT inh.inhrelid AS table_oid, + n.nspname AS table_schema, + c_1.relname AS table_name, + nn.nspname AS table_parent_schema, + cc.relname AS table_parent_name, + c_1.relispartition AS is_partition + FROM inheritance_tree it_1 + JOIN pg_inherits inh ON inh.inhparent = it_1.table_oid + JOIN pg_class c_1 ON inh.inhrelid = c_1.oid + JOIN pg_namespace n ON n.oid = c_1.relnamespace + JOIN pg_class cc ON it_1.table_oid = cc.oid + JOIN pg_namespace nn ON nn.oid = cc.relnamespace + ) +SELECT json_build_object ('data',json_agg(json_build_object('{#TABLE_PART}',it.table_parent_schema || '.' || it.table_parent_name))) +FROM inheritance_tree it +JOIN pg_class c ON c.oid = it.table_oid +LEFT JOIN pg_partitioned_table p ON p.partrelid = it.table_oid +WHERE pg_get_expr(c.relpartbound, c.oid, true) = 'DEFAULT'; +""" + # ищем все DEFAULT секции в которых есть данные + query = """WITH RECURSIVE inheritance_tree AS ( + SELECT c_1.oid AS table_oid, + n.nspname AS table_schema, + c_1.relname AS table_name, + NULL::name AS table_parent_schema, + NULL::name AS table_parent_name, + c_1.relispartition AS is_partition + FROM pg_class c_1 + JOIN pg_namespace n ON n.oid = c_1.relnamespace + WHERE c_1.relkind = 'p'::"char" AND c_1.relispartition = false + UNION ALL + SELECT inh.inhrelid AS table_oid, + n.nspname AS table_schema, + c_1.relname AS table_name, + nn.nspname AS table_parent_schema, + cc.relname AS table_parent_name, + c_1.relispartition AS is_partition + FROM inheritance_tree it_1 + JOIN pg_inherits inh ON inh.inhparent = it_1.table_oid + JOIN pg_class c_1 ON inh.inhrelid = c_1.oid + JOIN pg_namespace n ON n.oid = c_1.relnamespace + JOIN pg_class cc ON it_1.table_oid = cc.oid + JOIN pg_namespace nn ON nn.oid = cc.relnamespace + ) +SELECT case when c.reltuples::bigint < 0 then 0::bigint else c.reltuples::bigint end as reltuples, it.table_parent_schema, it.table_parent_name +FROM inheritance_tree it +JOIN pg_class c ON c.oid = it.table_oid +LEFT JOIN pg_partitioned_table p ON p.partrelid = it.table_oid +WHERE pg_get_expr(c.relpartbound, c.oid, true) = 'DEFAULT' + and not pg_is_in_recovery() +ORDER BY it.table_parent_schema, it.table_parent_name, c.reltuples; +""" + + AgentPluginType = 'pg' + key_rel_part = 'pgsql.partition.def.rows' + key_rel_part_discovery = key_rel_part+'{0}' + + def run(self, zbx): + tables = [] + for info_dbs in Pooler.query("select datname from pg_catalog.pg_database where datistemplate = false and datname not in ('mamonsu','postgres')"): + for info_rows in Pooler.query(self.query, info_dbs[0]): + table_name = '{0}.{1}.{2}'.format(info_dbs[0], info_rows[1], info_rows[2]) + tables.append({'{#TABLE_PART}': table_name}) + zbx.send(self.key_rel_part+'[{0}]'.format(table_name), info_rows[0]) + zbx.send(self.key_rel_part+'[]', zbx.json({'data': tables})) + + def discovery_rules(self, template, dashboard=False): + rule = { + 'name': 'Rows in default partition discovery', + 'key': self.key_rel_part_discovery.format('[{0}]'.format(self.Macros[self.Type])), + 'filter': '{#TABLE_PART}:.*' + } + items = [ + {'key': self.right_type(self.key_rel_part_discovery, var_discovery="{#TABLE_PART},"), + 'name': 'Rows in default partition: {#TABLE_PART}', + 'units': Plugin.UNITS.none, + 'value_type': Plugin.VALUE_TYPE.numeric_unsigned, + 'delay': self.Interval}, + ] + conditions = [ + { + 'condition': [ + {'macro': '{#TABLE_PART}', + 'value': '.*', + 'formulaid': 'A'} + ] + } + ] + graphs = [ + { + 'name': 'PostgreSQL: Rows in default partition {#TABLE_PART}', + 'items': [ + {'color': 'CC0000', + 'key': self.right_type(self.key_rel_part_discovery, var_discovery="{#TABLE_PART},")} + ] + } + ] + triggers = [{ + 'name': 'PostgreSQL: In the default partition {#TABLE_PART} there are rows on {HOSTNAME} (value={ITEM.LASTVALUE})', + 'expression': '{#TEMPLATE:'+self.right_type(self.key_rel_part_discovery, var_discovery="{#TABLE_PART},")+'.last()}>0' + } + ] + return template.discovery_rule(rule=rule, conditions=conditions, items=items, graphs=graphs, triggers=triggers) + + def keys_and_queries(self, template_zabbix): + result = ['{0},$2 $1 -c "{1}"'.format(self.key_rel_part_discovery.format("[*]"), self.query_agent_discovery), + ] + return template_zabbix.key_and_query(result) diff --git a/16/docker-mamonsu/pg_probackup.py b/16/docker-mamonsu/pg_probackup.py new file mode 100644 index 0000000..ae7e42e --- /dev/null +++ b/16/docker-mamonsu/pg_probackup.py @@ -0,0 +1,326 @@ +from mamonsu.plugins.pgsql.pool import Pooler +from mamonsu.plugins.system.plugin import SystemPlugin as Plugin +from mamonsu.lib.plugin import PluginDisableException +import json +import os +import subprocess +from datetime import datetime + +class PgProbackup(Plugin): + os_walk_error = None + block_size = 4096 + Interval = 15 * 60 + key_main = "pg_probackup.discovery{0}" + key_dir_size = "pg_probackup.dir.size{0}" + key_dir_error = "pg_probackup.dir.error{0}" + key_dir_duration_full = "pg_probackup.dir.duration_full{0}" + key_dir_duration_inc = "pg_probackup.dir.duration_inc{0}" + key_dir_endtime_backup = "pg_probackup.dir.end_time_backup{0}" + key_dir_starttime_backup = "pg_probackup.dir.start_time_backup{0}" + key_dir_status_backup = "pg_probackup.dir.status_backup{0}" + key_dir_mode_backup = "pg_probackup.dir.mode_backup{0}" + AgentPluginType = "pg" + Type = "mamonsu" + + DEFAULT_CONFIG = { + "max_time_run_backup2alert_in_sec": str(21600), # The maximum time of running time of backup to Alert in seconds (6 hours) + "max_time_lack_backup2alert_in_sec": str(100800), # The maximum time of lack of backup to Alert (28 hours) + } + + def set_os_walk_error(self, e): + self.os_walk_error = e + + def dir_size(self, path): + self.os_walk_error = None + tree = os.walk(path, onerror=self.set_os_walk_error) + total_size = 0 + for dirpath, dirnames, filenames in tree: + for file in filenames: + try: + size = os.path.getsize(os.path.join(dirpath, file)) + if 0 < size < self.block_size: + size = round(size / self.block_size) * self.block_size + self.block_size + except FileNotFoundError as e: + self.log.debug(str(e)) + size = 0 + total_size += size + try: + size = os.path.getsize(dirpath) + except FileNotFoundError as e: + self.log.debug(str(e)) + size = 0 + total_size += size + return total_size + + def run(self, zbx): + config_pg_probackup_path = self.plugin_config("pg_probackup_path") + + if config_pg_probackup_path is None or config_pg_probackup_path == "": + self.disable() + raise PluginDisableException( + """Disable plugin and exit, because the parameter "pg_probackup_path" in section [pgprobackup] is not + set. Set this parameter if needed and restart.""") + config_backup_dirs = self._plugin_config.get("backup_dirs", None) + + if config_backup_dirs is None or config_backup_dirs == "": + self.disable() + raise PluginDisableException( + """Disable plugin and exit, because the parameter "backup_dirs" in section [pgprobackup] is not set. + Set this parameter if needed and restart.""") + + fmt_data = "%Y-%m-%d %H:%M:%S+03" + backup_dirs = config_backup_dirs.split(",") + dirs = [] + test_recovery = False + for row in Pooler.query('select not pg_is_in_recovery() as test_recovery', 'postgres'): + test_recovery = row[0] + for _dir_top in backup_dirs: + + # Search for backups with bad status is done by running + # "pg_probackup show -B backup_dir" command + command = [config_pg_probackup_path, "show", "-B", _dir_top, "--format=json"] + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + return_code = process.returncode + if return_code != 0: + self.log.error( + "The command: {command} return code {return_code}. Error: {error}".format(command=command, + return_code=return_code, + error=stderr)) + continue + try: + result = json.loads(stdout.decode("utf-8")) + except Exception as e: + self.log.error("Error in convert data: {stdout} \n {e}".format(stdout=stdout, e=e)) + continue + + no_error= True + + for instance in result: + # We consider the sizes of each instance + instance_name = instance["instance"] + _dir = _dir_top + "/" + instance_name + dirs.append({"{#BACKUPDIR}": _dir}) + + # sud-directory backups + dir_size_backups = self.dir_size(_dir_top + "/backups/" + instance_name) + if self.os_walk_error: + self.log.error( + "Error in count size pg_probackup dir: {backup_catalog}. Error: {error}".format( + backup_catalog=(_dir_top + "/backups/" + instance_name), error=str(self.os_walk_error))) + else: + # We consider the size of the predefined directories - backups + zbx.send(self.key_dir_size.format("[" + _dir + "/backups]"), dir_size_backups) + + # sud-directory wal + dir_size_wal = self.dir_size(_dir_top + "/wal/" + instance_name) + if self.os_walk_error: + self.log.error( + "Error in count size pg_probackup dir: {backup_catalog}. Error: {error}".format( + backup_catalog=(_dir_top + "/wal/" + instance_name), error=str(self.os_walk_error))) + else: + # We consider the size of the predefined directories - wal + zbx.send(self.key_dir_size.format("[" + _dir + "/wal]"), dir_size_wal) + + # We consider the size of the predefined directories - backups and wal + zbx.send(self.key_dir_size.format("[" + _dir + "]"), dir_size_backups+dir_size_wal) + + full_send = 0 + for idx, backup in enumerate(instance.get("backups", [])): + status = backup["status"] + mode = backup["backup-mode"] + if idx == 0: + # Status of the last backup + zbx.send(self.key_dir_status_backup.format("[" + _dir + "]"), status) + # Backup Creation Mode Full, Page, Delta and Ptrack of the last backup + zbx.send(self.key_dir_mode_backup.format("[" + _dir + "]"), mode) + if test_recovery and status in ["ERROR", "CORRUPT", "ORPHAN"]: + error = ("Backup with id: {backup_id} in instance: {instance_name} in pg_probackup dir: " + + "{backup_catalog} has status: {status}.").format(backup_id=backup["id"], + instance_name=instance["instance"], + status=status, backup_catalog=_dir) + self.log.info(error) + no_error = False + zbx.send(self.key_dir_error.format("[" + _dir + "]"), error) + if idx == 0: + # the start time of the last backup at unixtime + start = datetime.strptime(backup["start-time"], fmt_data) + zbx.send(self.key_dir_starttime_backup.format("[" + _dir + "]"), start.timestamp()) + # check end-time and calculate duration + if "end-time" in backup: + end = datetime.strptime(backup["end-time"], fmt_data) + delta = (end - start).total_seconds() + # the end time of the last backup at unixtime + zbx.send(self.key_dir_endtime_backup.format("[" + _dir + "]"), end.timestamp()) + # duration full or incremental of the last backup + if backup["backup-mode"] == "FULL": + zbx.send(self.key_dir_duration_full.format("[" + _dir + "]"), delta) + full_send = 1 + else: + zbx.send(self.key_dir_duration_inc.format("[" + _dir + "]"), delta) + if full_send == 0 and "end-time" in backup and backup["backup-mode"] == "FULL": + start = datetime.strptime(backup["start-time"], fmt_data) + end = datetime.strptime(backup["end-time"], fmt_data) + delta = (end - start).total_seconds() + zbx.send(self.key_dir_duration_full.format("[" + _dir + "]"), delta) + full_send = 1 + + if no_error: + zbx.send(self.key_dir_error.format("[" + _dir + "]"), "ok") + + zbx.send(self.key_main.format("[]"), zbx.json({"data": dirs})) + del dirs + + def discovery_rules(self, template, dashboard=False): + rule = { + "name": "pg_probackup discovery", + "key": self.key_main.format("[{0}]".format(self.Macros[self.Type])), + } + if Plugin.old_zabbix: + conditions = [] + rule["filter"] = "{#BACKUPDIR}:.*" + else: + conditions = [ + { + "condition": [ + {"macro": "{#BACKUPDIR}", + "value": ".*", + "operator": 8, + "formulaid": "A"} + ] + } + ] + items = [ + {"key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: size", + "units": Plugin.UNITS.bytes, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "history": "31", + "delay": self.plugin_config("interval"), + "description": "Size of the entire catalog with backups"}, + {"key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR}/backups,"), + "name": "pg_probackup dir {#BACKUPDIR}/backups: size", + "units": Plugin.UNITS.bytes, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "history": "31", + "delay": self.plugin_config("interval"), + "description": "The size of the entire subdirectory /backups"}, + {"key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR}/wal,"), + "name": "pg_probackup dir {#BACKUPDIR}/wal: size", + "units": Plugin.UNITS.bytes, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "history": "31", + "delay": self.plugin_config("interval"), + "description": "The size of the entire subdirectory /wal"}, + {"key": self.right_type(self.key_dir_error, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: error", + "value_type": Plugin.VALUE_TYPE.text, + "delay": self.plugin_config("interval"), + "description": "Sign of the erroneous completion of the backup: ERROR, CORRUPT, ORPHAN"}, + {"key": self.right_type(self.key_dir_duration_full, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: duration full backup", + "units": Plugin.UNITS.s, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "history": "31", + "delay": self.plugin_config("interval"), + "description": "The duration of the last full backup"}, + {"key": self.right_type(self.key_dir_duration_inc, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: duration incremental backup", + "units": Plugin.UNITS.s, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "history": "31", + "delay": self.plugin_config("interval"), + "description": "The duration of the last incremental backup"}, + {"key": self.right_type(self.key_dir_endtime_backup, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: end time backup", + "units": Plugin.UNITS.unixtime, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "delay": self.plugin_config("interval"), + "description": "The end time of the last any backup"}, + {"key": self.right_type(self.key_dir_starttime_backup, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: start time backup", + "units": Plugin.UNITS.unixtime, + "value_type": Plugin.VALUE_TYPE.numeric_unsigned, + "delay": self.plugin_config("interval"), + "description": "The start time of the last any backup"}, + {"key": self.right_type(self.key_dir_status_backup, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: status", + "value_type": Plugin.VALUE_TYPE.text, + "delay": self.plugin_config("interval"), + "description": "Sign of the status completion of the last backup:\n\n" + "OK — the backup is complete and valid.\n" + "DONE — the backup is complete, but was not validated.\n" + "RUNNING — the backup is in progress.\n" + "MERGING — the backup is being merged.\n" + "MERGED — the backup data files were successfully merged, but its metadata is in the process of being updated. Only full backups can have this status.\n" + "DELETING — the backup files are being deleted.\n" + "CORRUPT — some of the backup files are corrupt.\n" + "ERROR — the backup was aborted because of an unexpected error.\n" + "ORPHAN — the backup is invalid because one of its parent backups is corrupt or missing.\n\n" + "https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup" + }, + {"key": self.right_type(self.key_dir_mode_backup, var_discovery="{#BACKUPDIR},"), + "name": "pg_probackup dir {#BACKUPDIR}: mode", + "value_type": Plugin.VALUE_TYPE.text, + "delay": self.plugin_config("interval"), + "description": "Backup Creation Mode:\n\n" + "FULL — creates a full backup that contains all the data files of the cluster to be restored.\n" + "DELTA — reads all data files in the data directory and creates an incremental backup for pages that have changed since the previous backup.\n" + "PAGE — creates an incremental backup based on the WAL files that have been generated since the previous full or incremental backup was taken. Only changed blocks are read from data files.\n" + "PTRACK — creates an incremental backup tracking page changes on the fly.\n\n" + "https://postgrespro.ru/docs/postgrespro/current/app-pgprobackup" + }, + ] + graphs = [ + { + "name": "pg_probackup: backup dir: {#BACKUPDIR} duration", + "type": 0, + "items": [ + {"color": "00897B", + "drawtype": 2, + "key": self.right_type(self.key_dir_duration_full, var_discovery="{#BACKUPDIR},")}, + {"color": "66BB6A", + "drawtype": 2, + "key": self.right_type(self.key_dir_duration_inc, var_discovery="{#BACKUPDIR},"), + "yaxisside": 1} + ] + }, + { + "name": "pg_probackup: backup dir: {#BACKUPDIR} size", + "type": 0, + "items": [ + {"color": "C8E6C9", + "drawtype": 1, + "key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR},")}, + {"color": "00897B", + "drawtype": 2, + "key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR}/backups,")}, + {"color": "66BB6A", + "drawtype": 2, + "key": self.right_type(self.key_dir_size, var_discovery="{#BACKUPDIR}/wal,"), + "yaxisside": 1} + ] + }, + ] + triggers = [ + {"name": "pg_probackup: error in dir {#BACKUPDIR} (hostname={HOSTNAME} value={ITEM.LASTVALUE})", + "expression": "{#TEMPLATE:pg_probackup.dir.error[{#BACKUPDIR}].str(ok)}<>1", + "priority": 3, + "description": "Backup status: CORRUPT / ERROR / ORPHAN"}, + {"name": "pg_probackup: long time no backups on {HOSTNAME} in dir {#BACKUPDIR}", + "expression": "({#TEMPLATE:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].now()}-{#TEMPLATE:pg_probackup.dir.end_time_backup[{#BACKUPDIR}].last()})>" + + self.plugin_config("max_time_lack_backup2alert_in_sec"), + "priority": 2, + "description": "From the moment of completion of the backup passed more than " + + str(int(int(self.plugin_config("max_time_lack_backup2alert_in_sec"))/3600)) + " hours (" + + self.plugin_config("max_time_lack_backup2alert_in_sec") + " seconds)"}, + {"name": "pg_probackup: backup runs too long on {HOSTNAME} in dir {#BACKUPDIR} (RUNNING)", + "expression": '{#TEMPLATE:pg_probackup.dir.status_backup[{#BACKUPDIR}].last()}="RUNNING"' + " and ({#TEMPLATE:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].now()}-{#TEMPLATE:pg_probackup.dir.start_time_backup[{#BACKUPDIR}].last()})>" + + self.plugin_config("max_time_run_backup2alert_in_sec"), + "priority": 2, + "description": "From the moment of start of the backup passed more than " + + str(int(int(self.plugin_config("max_time_run_backup2alert_in_sec"))/3600)) + " hours (" + + self.plugin_config("max_time_run_backup2alert_in_sec") + " seconds)"}, + ] + return template.discovery_rule(rule=rule, conditions=conditions, items=items, graphs=graphs, triggers=triggers) diff --git a/16/docker-mamonsu/pre.sql b/16/docker-mamonsu/pre.sql new file mode 100644 index 0000000..eca9626 --- /dev/null +++ b/16/docker-mamonsu/pre.sql @@ -0,0 +1,34 @@ +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select not pg_is_in_recovery() as is_master \gset +\if :is_master + select not exists(select true FROM pg_catalog.pg_database where datname='mamonsu') as is_db_mamonsu \gset + \if :is_db_mamonsu + CREATE DATABASE mamonsu; + \endif + \c mamonsu + CREATE EXTENSION IF NOT EXISTS pg_buffercache; + CREATE EXTENSION IF NOT EXISTS pg_stat_statements; + -- check role mamonsu ... + select not exists(select * from pg_roles where rolname = 'mamonsu') as is_role_mamonsu \gset + \if :is_role_mamonsu + select :'MAMONSU_PASSWORD' = '' as is_mamonsu_password_exists \gset + \if :is_mamonsu_password_exists + CREATE ROLE mamonsu LOGIN NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE NOREPLICATION; + \else + CREATE ROLE mamonsu LOGIN PASSWORD :'MAMONSU_PASSWORD' NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE NOREPLICATION; + \endif + \else + select :'MAMONSU_PASSWORD' <> '' as is_mamonsu_password_notexists \gset + \if :is_mamonsu_password_notexists + ALTER ROLE mamonsu WITH PASSWORD :'MAMONSU_PASSWORD' ; + \endif + \endif + GRANT USAGE ON SCHEMA pg_catalog TO mamonsu; + GRANT SELECT ON TABLE pg_proc TO mamonsu; +\endif + +-- get list all current DBs +select string_agg(datname,' ') from pg_catalog.pg_database where not datistemplate; diff --git a/16/docker-pgprobackup/Dockerfile b/16/docker-pgprobackup/Dockerfile new file mode 100644 index 0000000..b77d5e5 --- /dev/null +++ b/16/docker-pgprobackup/Dockerfile @@ -0,0 +1,52 @@ +# Based on: +# https://hub.docker.com/_/debian +# +FROM debian:bookworm-slim + +LABEL maintainer="Sergey Grinko " + +ENV DEBIAN_RELEASE bookworm +ENV PG_MAJOR 16 +ENV PGDATA /var/lib/postgresql/data +ENV BACKUP_PATH /mnt/pgbak + +# explicitly set user/group IDs +RUN set -eux; \ + groupadd -r postgres --gid=999; \ + useradd -r -g postgres --uid=999 --home-dir=/var/lib/postgresql --shell=/bin/bash postgres; \ + mkdir -p /var/lib/postgresql/data; \ + chown -R postgres:postgres /var/lib/postgresql + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates wget gnupg sendemail jq \ + # подключаем репозитарий сообщества PostgreSQL + && echo "deb http://apt.postgresql.org/pub/repos/apt ${DEBIAN_RELEASE}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + # подключаем репозитарий архивной утилиты + && echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $DEBIAN_RELEASE main-$DEBIAN_RELEASE" > /etc/apt/sources.list.d/pg_probackup.list \ + && wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | tee /etc/apt/trusted.gpg.d/pg_probackup.asc \ + # ... updating ... + && apt-get update \ + # ... install pg-probackup and other... + && apt-get install -y --no-install-recommends \ + pg-probackup-$PG_MAJOR \ + postgresql-client-$PG_MAJOR \ + && mkdir -p $BACKUP_PATH \ + && chown -R postgres:postgres $BACKUP_PATH \ + # ... cleaning ... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./backup.sh /usr/local/bin +COPY ./show.sh /usr/local/bin +# copy sql files +COPY ./sql/*.sql /usr/local/bin/ + +RUN chmod +x /usr/local/bin/*.sh + +USER postgres +ENTRYPOINT [ "/usr/local/bin/backup.sh" ] diff --git a/16/docker-pgprobackup/backup.sh b/16/docker-pgprobackup/backup.sh new file mode 100644 index 0000000..5f7cb6a --- /dev/null +++ b/16/docker-pgprobackup/backup.sh @@ -0,0 +1,122 @@ +#!/bin/bash + +# calculate day week +DOW=$(date +%u) +cd $BACKUP_PATH + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_THREADS" = "" ]; then + BACKUP_THREADS=4 +fi + +if [ "$DOW" = "6" ] ; then + # make a full backup once a week (Saturday) + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE +fi + +if [ "$BACKUP_STREAM" = "" ]; then + BACKUP_STREAM="stream" +fi +if [[ "$BACKUP_STREAM" = "yes" || "$BACKUP_STREAM" = "stream" ]]; then + BACKUP_STREAM="--stream" +else + BACKUP_STREAM="" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +if [ "$PGUSER" = "" ]; then + PGUSER=postgres +fi + +COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` + +if [ "$COUNT_DIR" = "0" ]; then + echo "Init new directory for backup: $BACKUP_PATH" + /usr/bin/pg_probackup-$PG_MAJOR init -B $BACKUP_PATH -D $PGDATA +fi + +if ! [ -d "$BACKUP_PATH/backups/$PG_MAJOR" ]; then + echo "Create new instance for claster: $PG_MAJOR" + /usr/bin/pg_probackup-$PG_MAJOR add-instance -B $BACKUP_PATH --instance=$PG_MAJOR -D $PGDATA + /usr/bin/pg_probackup-$PG_MAJOR set-config -B $BACKUP_PATH --instance=$PG_MAJOR --retention-window=30 --compress-algorithm=zlib --compress-level=6 +fi + +IS_FULL=`/usr/bin/pg_probackup-$PG_MAJOR show --instance=$PG_MAJOR --backup-path=$BACKUP_PATH | grep FULL | grep 'OK\|DONE'` + +if ! [ -f $PGDATA/archive_active.trigger ] ; then + touch $PGDATA/archive_active.trigger +fi + +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then + echo "The initial backup must be type FULL ..." + /usr/bin/pg_probackup-$PG_MAJOR backup -d postgres --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS +else + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + else + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi + STATUS=`/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'` + LAST_STATE=${STATUS//'"'/''} + if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then + # You need to run a full backup, as an error occurred with incremental + # Perhaps the loss of the segment at Failover ... + /usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS + fi +fi + +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS +else + # чистим старые бэкапы в соответствии с настройками + /usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS +fi + +# collecting statistics on backups +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`grep -c ERROR ~postgres/backups.txt` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +# send mail to DBA +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -o tls=no -o message-content-type=html -o message-charset=utf-8 -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-pgprobackup/show.sh b/16/docker-pgprobackup/show.sh new file mode 100644 index 0000000..f97646d --- /dev/null +++ b/16/docker-pgprobackup/show.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# $1 - yes/no (the sign for send to email, yes - default) +# $2 - list of email recipients (separated by a space) + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +if [ "$1" != "" ]; then + EMAIL_SEND=$1 +fi + +if [ "$2" != "" ]; then + EMAILTO="$2" +fi + +cd $BACKUP_PATH + +# send mail to DBA +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`grep -c ERROR ~postgres/backups.txt` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +cat ~postgres/backups.txt +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -o tls=no -o message-content-type=html -o message-charset=utf-8 -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-pgprobackup/sql/first_db.sql b/16/docker-pgprobackup/sql/first_db.sql new file mode 100644 index 0000000..496d4d9 --- /dev/null +++ b/16/docker-pgprobackup/sql/first_db.sql @@ -0,0 +1 @@ +select datname from pg_database where not datistemplate and datname not in ('postgres','mamonsu') limit 1; diff --git a/16/docker-pgprocheckdb/Dockerfile b/16/docker-pgprocheckdb/Dockerfile new file mode 100644 index 0000000..978f3c5 --- /dev/null +++ b/16/docker-pgprocheckdb/Dockerfile @@ -0,0 +1,53 @@ +# Based on: +# https://hub.docker.com/_/debian +# +FROM debian:bookworm-slim + +LABEL maintainer="Sergey Grinko " + +ENV DEBIAN_RELEASE bookworm +ENV PG_MAJOR 16 +ENV PGDATA /var/lib/postgresql/data +ENV BACKUP_PATH /mnt/pgbak + +# explicitly set user/group IDs +RUN set -eux; \ + groupadd -r postgres --gid=999; \ + useradd -r -g postgres --uid=999 --home-dir=/var/lib/postgresql --shell=/bin/bash postgres; \ + mkdir -p /var/lib/postgresql/data; \ + chown -R postgres:postgres /var/lib/postgresql + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates wget gnupg sendemail jq \ + # подключаем репозитарий сообщества PostgreSQL + && echo "deb http://apt.postgresql.org/pub/repos/apt ${DEBIAN_RELEASE}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + # подключаем репозитарий архивной утилиты + && echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $DEBIAN_RELEASE main-$DEBIAN_RELEASE" > /etc/apt/sources.list.d/pg_probackup.list \ + && wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | tee /etc/apt/trusted.gpg.d/pg_probackup.asc \ + # ... updating ... + && apt-get update \ + # ... install pg-probackup and other... + && apt-get install -y --no-install-recommends \ + pg-probackup-$PG_MAJOR \ + postgresql-$PG_MAJOR-pg-catcheck \ + postgresql-client-$PG_MAJOR \ + && mkdir -p $BACKUP_PATH \ + && chown -R postgres:postgres $BACKUP_PATH \ + # ... cleaning ... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./show.sh /usr/local/bin +COPY ./check_cluster.sh /usr/local/bin +# copy sql files +COPY ./sql/*.sql /usr/local/bin/ + +RUN chmod +x /usr/local/bin/*.sh + +USER postgres +ENTRYPOINT [ "/usr/local/bin/check_cluster.sh" ] diff --git a/16/docker-pgprocheckdb/check_cluster.sh b/16/docker-pgprocheckdb/check_cluster.sh new file mode 100644 index 0000000..5626f49 --- /dev/null +++ b/16/docker-pgprocheckdb/check_cluster.sh @@ -0,0 +1,169 @@ +#!/bin/bash + +# $1 - 'amcheck' Enable an additional cluster with an Amcheck extension +# $2 - 'heapallindexed' It will be additionally verified that in the index, all the cortices of the heaps that should get into it + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_THREADS" = "" ]; then + BACKUP_THREADS=4 +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +if [ "$PGUSER" = "" ]; then + PGUSER=postgres +fi + +if [ "$AMCHECK" = "" ]; then + AMCHECK="false" +fi + +if [ "$HEAPALLINDEXED" = "" ]; then + HEAPALLINDEXED="false" +fi + +if [ "$1" = "amcheck" ]; then + AMCHECK="true" +fi +if [ "$1" = "--amcheck" ]; then + AMCHECK="true" +fi + +if [ "$2" = "heapallindexed" ]; then + HEAPALLINDEXED="true" +fi +if [ "$2" = "--heapallindexed" ]; then + HEAPALLINDEXED="true" +fi + +function send_email() +{ + # отправляем письмо о выполнении скрипта, в параметре $1 указывается поясняющий текст, а в $2 заголовок письма + # send mail to DBA + if [ "$EMAIL_SEND" = "yes" ]; then + curr_log=`psql -Xtq -c 'select pg_current_logfile()'` + db_name=`psql -Xtq -f /usr/local/bin/first_db.sql` + # прикрепляем к письму последние несколько строк из log файла + echo "" >> $REPORT_PATH/${curr_date}_check_cluster.txt + echo '-- ===================== данные из лог файла postgresql ========================= --' >> $REPORT_PATH/${curr_date}_check_cluster.txt + tail -n 100 $curr_log >> $REPORT_PATH/${curr_date}_check_cluster.txt + # отправляем письмо + (echo "
 $1 
" ; cat $REPORT_PATH/${curr_date}_check_cluster.txt ; echo '
';) | sendEmail -o tls=no -o message-content-type=html -o message-charset=utf-8 -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u "$2 ($db_name)" + else + cat $REPORT_PATH/${curr_date}_check_cluster.txt + fi +} + +# check paths... +PGLOG=/var/log/postgresql +SCRIPT_PATH=/var/lib/postgresql +REPORT_PATH=$PGLOG/report + +mkdir -p $REPORT_PATH + +cd $REPORT_PATH + +curr_date=`eval date +%F` +echo `date +%T` 'Старт checkdb проверки' > $REPORT_PATH/${curr_date}_check_cluster.txt + +# дополнительные опции проверки +ADDOPTIONS="" +if [ $AMCHECK = "true" ] ; then + ADDOPTIONS="--amcheck" +fi +if [ $HEAPALLINDEXED = "true" ] ; then + ADDOPTIONS="--amcheck --heapallindexed" +fi +echo "Режим проверки: checkdb $ADDOPTIONS" >> $REPORT_PATH/${curr_date}_check_cluster.txt +# запускаем общую проверку... +/usr/bin/pg_probackup-$PG_MAJOR checkdb $ADDOPTIONS --threads=$BACKUP_THREADS -D $PGDATA -d postgres -w -h ${PGHOST:-127.0.0.1} -p ${PGPORT:-5432} >> $REPORT_PATH/${curr_date}_check_cluster.txt 2>&1 + +######### + +echo `date +%T` 'Проверка checkdb завершена' >> $REPORT_PATH/${curr_date}_check_cluster.txt + +# проверяем наличие файла результатов проверки checkdb + +# выполняем проверки лога checkdb +if grep -e "invalid file size" $REPORT_PATH/${curr_date}_check_cluster.txt; then + # если есть сообщение "invalid file size" - прерываем скрипт, как минимум один файл имеет некорректный размер + send_email "обнаружен поврежденный файл с некорректным размером!" "Check cluster Failed" + exit 1 +fi + +if grep -e "page verification failed" $REPORT_PATH/${curr_date}_check_cluster.txt; then + # если есть сообщение "page verification failed" - прерываем скрипт, как минимум один файл имеет некорректную чексумму + send_email "обнаружен поврежденный файл с некорректной чексуммой!" "Check cluster Failed" + exit 1 +fi + +# формируем счетчики ошибок которые можно игнорировать (не B-Tree индексы, итоговое сообщение об ошибке из-за этих индексов) (возможно потом нужно будет дополнить) +# считаем количество ошибок из-за не B-Tree индексов +ERROR_NOT_BTREE_INDEX=$(grep -i -e "Amcheck failed.*ERROR.*only B-Tree indexes" $REPORT_PATH/${curr_date}_check_cluster.txt | wc -l) + +# считаем финальное сообщение об ошибке из-за невалидности индексов (может указывать на наличие не B-Tree индексов) +ERROR_FINAL_VALID_INDEX_CHECK=$(grep -i -e "ERROR.*Not all checked indexes are valid" $REPORT_PATH/${curr_date}_check_cluster.txt | wc -l) + +# сообщение об ошибке которое бывает при проверке через --amcheck +ERROR_FINAL_AMCHECK=$(grep -i -e "ERROR: Some databases were not amchecked." $REPORT_PATH/${curr_date}_check_cluster.txt | wc -l) + +# суммируем ошибки которые можно игнорировать +ERROR_FOR_IGNOR=$(($ERROR_NOT_BTREE_INDEX + $ERROR_FINAL_VALID_INDEX_CHECK + $ERROR_FINAL_AMCHECK)) + +# формируем общий счетчик всех ошибок в логе checkdb +ERROR_ALL=$(grep -i -e "ERROR" $REPORT_PATH/${curr_date}_check_cluster.txt | wc -l) + +if (($ERROR_ALL > $ERROR_FOR_IGNOR)); then + # общее число ERROR больше чем тех что можно игнорировать, значит checkdb выявил проблемы, прерываем работу скрипта + send_email "Выявлены проблемы при проверке целостности бд, необходим анализ причины!" "Check cluster Failed" + exit 1 +fi + +echo `date +%T` 'проверка бд checkdb '$ADDOPTIONS' завершена' >> $REPORT_PATH/${curr_date}_check_cluster.txt + +######## + +echo `date +%T` 'Выполняем проверку бд при помощи pg_catcheck...' >> $REPORT_PATH/${curr_date}_check_cluster.txt + +cd ~postgres +echo `date +%T` 'Старт проверки pg_catcheck...' >> $REPORT_PATH/${curr_date}_check_cluster.txt + +# получаем список баз данных в кластере +dblist=`psql -Xtq -c "select string_agg(datname, ' ') from pg_database where not datistemplate;"` + +echo -e "Список баз для проверки:\n$dblist" >> $REPORT_PATH/${curr_date}_check_cluster.txt + +# выполняем проверку каждой базы данных кластера при помощи pg_catcheck +for db in $dblist +do + echo "" >> $REPORT_PATH/${curr_date}_check_cluster.txt + echo "Check database: $db" >> $REPORT_PATH/${curr_date}_check_cluster.txt + if ! /usr/lib/postgresql/$PG_MAJOR/bin/pg_catcheck --postgresql --select-from-relations $db >> $REPORT_PATH/${curr_date}_check_cluster.txt 2>&1 ; then + send_email "Найдены ошибки в бд $db при проверке pg_catcheck!" "$REPORT_PATH/${curr_date}_check_cluster.txt" + exit 1 + fi +done + +echo `date +%T` 'Проверка pg_catcheck завершена' >> $REPORT_PATH/${curr_date}_check_cluster.txt +######## + +# отправляем письмо о корректном завершении работ +send_email "Результаты проверки кластера:" "Check cluster OK" diff --git a/16/docker-pgprocheckdb/show.sh b/16/docker-pgprocheckdb/show.sh new file mode 100644 index 0000000..f97646d --- /dev/null +++ b/16/docker-pgprocheckdb/show.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# $1 - yes/no (the sign for send to email, yes - default) +# $2 - list of email recipients (separated by a space) + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +if [ "$1" != "" ]; then + EMAIL_SEND=$1 +fi + +if [ "$2" != "" ]; then + EMAILTO="$2" +fi + +cd $BACKUP_PATH + +# send mail to DBA +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt +/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`grep -c ERROR ~postgres/backups.txt` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +cat ~postgres/backups.txt +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -o tls=no -o message-content-type=html -o message-charset=utf-8 -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-pgprocheckdb/sql/first_db.sql b/16/docker-pgprocheckdb/sql/first_db.sql new file mode 100644 index 0000000..496d4d9 --- /dev/null +++ b/16/docker-pgprocheckdb/sql/first_db.sql @@ -0,0 +1 @@ +select datname from pg_database where not datistemplate and datname not in ('postgres','mamonsu') limit 1; diff --git a/16/docker-pgprorestore/Dockerfile b/16/docker-pgprorestore/Dockerfile new file mode 100644 index 0000000..59c9872 --- /dev/null +++ b/16/docker-pgprorestore/Dockerfile @@ -0,0 +1,47 @@ +# Based on: +# https://hub.docker.com/_/debian +# +FROM debian:bookworm-slim + +LABEL maintainer="Sergey Grinko " + +ENV DEBIAN_RELEASE bookworm +ENV PG_MAJOR 16 +ENV PGDATA /var/lib/postgresql/data +ENV BACKUP_PATH /mnt/pgbak + +# explicitly set user/group IDs +RUN set -eux; \ + groupadd -r postgres --gid=999; \ + useradd -r -g postgres --uid=999 --home-dir=/var/lib/postgresql --shell=/bin/bash postgres; \ + mkdir -p /var/lib/postgresql/data; \ + chown -R postgres:postgres /var/lib/postgresql + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates wget gnupg sendemail jq \ + # подключаем репозитарий сообщества PostgreSQL + && echo "deb http://apt.postgresql.org/pub/repos/apt ${DEBIAN_RELEASE}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ + && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ + # подключаем репозитарий архивной утилиты + && echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $DEBIAN_RELEASE main-$DEBIAN_RELEASE" > /etc/apt/sources.list.d/pg_probackup.list \ + && wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | tee /etc/apt/trusted.gpg.d/pg_probackup.asc \ + # ... updating ... + && apt-get update \ + && apt-get install -y --no-install-recommends \ + pg-probackup-$PG_MAJOR \ + && mkdir -p $BACKUP_PATH \ + && chown -R postgres:postgres $BACKUP_PATH \ + # ... cleaning ... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./restore.sh /usr/local/bin +COPY ./show.sh /usr/local/bin + +RUN chmod +x /usr/local/bin/*.sh + +ENTRYPOINT [ "/usr/local/bin/restore.sh" ] diff --git a/16/docker-pgprorestore/restore.sh b/16/docker-pgprorestore/restore.sh new file mode 100644 index 0000000..bc1cbb5 --- /dev/null +++ b/16/docker-pgprorestore/restore.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# +# $TARGET_TIME - the time for which you need to recover, if empty, it will be according to the state of the backup itself +# if time is specified, then $TARGET_ID is ignored +# $TARGET_ID - backup label from command output pg_probackup show +# $BACKUP_THREADS - the count threads: 4 (default) or this number +# + +if [ "$BACKUP_THREADS" = "" ]; then + BACKUP_THREADS=4 +fi + +#if [ "$TARGET_TIME" = "" ]; then +# if [ "$TARGET_ID" = "" ]; then +# # set restore time to current (default) +# TARGET_TIME=`date +"%F %T"` +# fi +#fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +cd $BACKUP_PATH + +rm -rf $PGDATA/* +mkdir -p $PGDATA +chmod go-rwx $PGDATA +chown -R postgres:postgres $PGDATA + +echo ================================ +echo 'start restore server ...' +echo ================================ + +if [ -n "$TARGET_ID" ] ; then + su - postgres -c "TZ=$TZ /usr/bin/pg_probackup-$PG_MAJOR restore --recovery-target-action=promote --skip-block-validation --no-validate --threads=$BACKUP_THREADS --backup-path=$BACKUP_PATH --instance=$PG_MAJOR -D $PGDATA --recovery-target=immediate -i $TARGET_ID" + echo "====================================" + echo "restore to ID: $TARGET_ID" +elif [ -n "$TARGET_TIME" ] ; then + su - postgres -c "TZ=$TZ /usr/bin/pg_probackup-$PG_MAJOR restore --recovery-target-action=promote --skip-block-validation --no-validate --threads=$BACKUP_THREADS --backup-path=$BACKUP_PATH --instance=$PG_MAJOR -D $PGDATA --recovery-target-time=\"$TARGET_TIME\"" + echo "====================================" + echo "restore to time: $TARGET_TIME" +else + su - postgres -c "TZ=$TZ /usr/bin/pg_probackup-$PG_MAJOR restore --recovery-target-action=promote --skip-block-validation --no-validate --threads=$BACKUP_THREADS --backup-path=$BACKUP_PATH --instance=$PG_MAJOR -D $PGDATA --recovery-target=latest" + echo "====================================" + echo "restore to latest" +fi +echo "====================================" diff --git a/16/docker-pgprorestore/show.sh b/16/docker-pgprorestore/show.sh new file mode 100644 index 0000000..cd46963 --- /dev/null +++ b/16/docker-pgprorestore/show.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# $1 - yes/no (the sign for send to email, yes - default) +# $2 - list of email recipients (separated by a space) + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$1" != "" ]; then + EMAIL_SEND=$1 +fi + +if [ "$2" != "" ]; then + EMAILTO="$2" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +cd $BACKUP_PATH + +# send mail to DBA +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt" + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`su - postgres -c "grep -c ERROR ~postgres/backups.txt"` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +cat ~postgres/backups.txt +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -o tls=no -o message-content-type=html -o message-charset=utf-8 -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-pgupgrade/Dockerfile b/16/docker-pgupgrade/Dockerfile new file mode 100644 index 0000000..6081635 --- /dev/null +++ b/16/docker-pgupgrade/Dockerfile @@ -0,0 +1,196 @@ +# Based on: +# https://hub.docker.com/_/postgres +# https://github.com/docker-library/postgres +# https://hub.docker.com/r/postgis/postgis +# https://github.com/postgis/docker-postgis +# +FROM postgres:16.2 +LABEL maintainer="Sergey Grinko " + +ENV PG_MAJOR_OLD 15 + +ENV PGBINOLD /usr/lib/postgresql/$PG_MAJOR_OLD/bin +ENV PGBINNEW /usr/lib/postgresql/$PG_MAJOR/bin + +ENV PGDATAOLD /var/lib/postgresql/$PG_MAJOR_OLD/data +ENV PGDATANEW /var/lib/postgresql/$PG_MAJOR/data + +ENV TSEARCHDATAOLD /usr/share/postgresql/$PG_MAJOR_OLD/tsearch_data +ENV TSEARCHDATANEW /usr/share/postgresql/$PG_MAJOR/tsearch_data + +# режим копирования каталога PGDATA: HARDLINK, AUTO, COPY +ENV PGDATACOPY_MODE HARDLINK + +ENV LANG ru_RU.utf8 + +ENV POSTGIS_MAJOR 3 +ENV DEBIAN_RELEASE bookworm +ENV BACKUP_PATH /mnt/pgbak +ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" +ENV RUM_VERSION 1.3.13 + +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ + && apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ + && apt-cache showpkg postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ + && apt-get install -y --no-install-recommends \ +# ставим пакеты для предыдущей версии + postgresql-$PG_MAJOR_OLD \ + postgresql-server-dev-$PG_MAJOR_OLD \ + postgresql-$PG_MAJOR_OLD-postgis-$POSTGIS_MAJOR \ + postgresql-$PG_MAJOR_OLD-postgis-$POSTGIS_MAJOR-scripts \ + postgresql-plpython3-$PG_MAJOR_OLD \ + postgresql-$PG_MAJOR_OLD-repack \ + postgresql-$PG_MAJOR_OLD-pldebugger \ + postgresql-$PG_MAJOR_OLD-plpgsql-check \ + postgresql-$PG_MAJOR_OLD-tds-fdw \ + postgresql-$PG_MAJOR_OLD-cron \ + postgresql-$PG_MAJOR_OLD-rum \ +# ставим пакеты для новой версии + postgresql-server-dev-$PG_MAJOR \ + postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ + postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR-scripts \ + postgresql-plpython3-$PG_MAJOR \ + postgresql-$PG_MAJOR-repack \ + postgresql-$PG_MAJOR-pldebugger \ + postgresql-$PG_MAJOR-plpgsql-check \ + postgresql-$PG_MAJOR-tds-fdw \ + libkrb5-dev \ + && sed -i "s/;\ttext size = 64512/\ttext size = 1262485504/g" /etc/freetds/freetds.conf \ + && git config --global http.sslverify false \ + && mkdir -p /tmp/build_ext \ + && cd /tmp/build_ext \ + && rm -rf /tmp/build_ext/* \ +# ====== hunspell_dicts + && git clone https://github.com/postgrespro/hunspell_dicts \ + && cd hunspell_dicts \ + && cd hunspell_en_us \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru_aot \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_tsparser + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/pg_tsparser \ + && cd pg_tsparser \ + && make USE_PGXS=1 install \ +# ====== shared_ispell + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/shared_ispell \ + && cd shared_ispell \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_variables + && cd /tmp/build_ext \ + && git clone https://github.com/xinferum/pg_variables \ + && cd pg_variables \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== rum + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/rum --branch $RUM_VERSION --single-branch \ + && cd rum \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_cron + && cd /tmp/build_ext \ + && git clone https://github.com/citusdata/pg_cron \ + && cd pg_cron \ + && make \ + && make install \ +# ====== pg_dbo_timestamp + && cd /tmp/build_ext \ + && git clone https://github.com/pgcodekeeper/pg_dbo_timestamp \ + && cd pg_dbo_timestamp \ + && make USE_PGXS=1 install \ +# ====== pg_background + && cd /tmp/build_ext \ + && git clone https://github.com/vibhorkum/pg_background \ + && cd pg_background \ + && make \ + && make install \ +# ====== create backup path ... + && mkdir -p $BACKUP_PATH \ + && chmod 0777 $BACKUP_PATH \ + && chmod 0777 /var/log/postgresql \ + && chown postgres:postgres $BACKUP_PATH /var/log/postgresql \ + # ====== make files on folder tsearch_data for mapping ... + && cd /usr/share/postgresql/$PG_MAJOR/tsearch_data \ + && tar -czf /usr/share/postgresql/$PG_MAJOR/tsearch_data.tar.gz . \ + && cd / \ + && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ +# +# для предыдущей версии +# + && export PATH=$PGBINOLD:$PATH \ + && mkdir -p /tmp/build_ext \ + && cd /tmp/build_ext \ + && rm -rf /tmp/build_ext/* \ +# ====== hunspell_dicts + && git clone https://github.com/postgrespro/hunspell_dicts \ + && cd hunspell_dicts \ + && cd hunspell_en_us \ + && make USE_PGXS=1 \ + && PATH=$PATH make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru \ + && make USE_PGXS=1 \ + && PATH=$PATH make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru_aot \ + && make USE_PGXS=1 \ + && PATH=$PATH make USE_PGXS=1 install \ +# ====== pg_tsparser + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/pg_tsparser \ + && cd pg_tsparser \ + && PATH=$PATH make USE_PGXS=1 install \ +# ====== shared_ispell + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/shared_ispell \ + && cd shared_ispell \ + && make USE_PGXS=1 \ + && PATH=$PATH make USE_PGXS=1 install \ +# ====== pg_dbo_timestamp + && cd /tmp/build_ext \ + && git clone https://github.com/pgcodekeeper/pg_dbo_timestamp \ + && cd pg_dbo_timestamp \ + && PATH=$PATH make USE_PGXS=1 install \ +# ====== pg_background + && cd /tmp/build_ext \ + && git clone https://github.com/vibhorkum/pg_background \ + && cd pg_background \ + && make \ + && PATH=$PATH make install \ +# ====== pg_variables + && cd /tmp/build_ext \ + && git clone https://github.com/xinferum/pg_variables \ + && cd pg_variables \ + && make USE_PGXS=1 \ + && PATH=$PATH make USE_PGXS=1 install \ +# ====== clean all unused package... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* + +COPY ./upgrade.sh /usr/local/bin +# copy postgres files +COPY ./pg_hba.conf /usr/local/bin +COPY ./pg_ident.conf /usr/local/bin +COPY ./postgresql.conf /usr/local/bin + +RUN chmod +x /usr/local/bin/*.sh \ + && chown postgres:postgres /usr/local/bin/pg_hba.conf /usr/local/bin/pg_ident.conf /usr/local/bin/postgresql.conf + + +RUN mkdir -p "$PGDATAOLD" "$PGDATANEW" "$TSEARCHDATAOLD" "$TSEARCHDATANEW" \ + && chown -R postgres:postgres /var/lib/postgresql "$PGDATAOLD" "$PGDATANEW" + +WORKDIR /var/lib/postgresql + +ENTRYPOINT ["upgrade.sh"] diff --git a/16/docker-pgupgrade/pg_hba.conf b/16/docker-pgupgrade/pg_hba.conf new file mode 100644 index 0000000..bd5f6c0 --- /dev/null +++ b/16/docker-pgupgrade/pg_hba.conf @@ -0,0 +1,102 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# CAUTION: Configuring the system for local "trust" authentication +# allows any local user to connect as any PostgreSQL user, including +# the database superuser. If you do not trust all your local users, +# use another authentication method. + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all peer + +# IPv4 local connections: +host all all 127.0.0.1/32 trust +host all all localhost trust + +# IPv6 local connections: +host all all ::1/128 trust + +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all trust +host replication all all trust + +host all all all md5 diff --git a/16/docker-pgupgrade/pg_ident.conf b/16/docker-pgupgrade/pg_ident.conf new file mode 100644 index 0000000..2a21033 --- /dev/null +++ b/16/docker-pgupgrade/pg_ident.conf @@ -0,0 +1,43 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +main postgres backup \ No newline at end of file diff --git a/16/docker-pgupgrade/postgresql.conf b/16/docker-pgupgrade/postgresql.conf new file mode 100644 index 0000000..e1e427f --- /dev/null +++ b/16/docker-pgupgrade/postgresql.conf @@ -0,0 +1,861 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +port = 5432 # (change requires restart) +max_connections = 140 # (change requires restart) +#reserved_connections = 0 # (change requires restart) +superuser_reserved_connections = 5 # (change requires restart) +unix_socket_directories = '/var/run/postgresql, /tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +password_encryption = md5 # scram-sha-256 or md5 +#scram_iterations = 4096 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off +#gss_accept_delegation = off + +# - SSL - + +#ssl = on +#ssl_ca_file = '/var/lib/pgsql/certs/kscl.crt' +#ssl_cert_file = '/var/lib/pgsql/certs/psql.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = '/var/lib/pgsql/certs/psql.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 256MB # min 128kB + # (change requires restart) +huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +temp_buffers = 32MB # min 800kB +max_prepared_transactions = 200 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +work_mem = 20MB # min 64kB +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem +maintenance_work_mem = 256MB # min 1MB +autovacuum_work_mem = 128MB # min 1MB, or -1 to use maintenance_work_mem +logical_decoding_work_mem = 256MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is usually the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +min_dynamic_shared_memory = 128MB # (change requires restart) +#vacuum_buffer_usage_limit = 256kB # size of vacuum and analyze buffer access strategy ring; + # 0 to disable vacuum buffer access strategy; + # range 128kB to 16GB + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +max_files_per_process = 4096 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +vacuum_cost_delay = 1 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +vacuum_cost_limit = 10000 # 1-10000 credits + +# - Background Writer - + +bgwriter_delay = 10ms # 10-10000ms between rounds +bgwriter_lru_maxpages = 1600 # max buffers written/round, 0 disables +bgwriter_lru_multiplier = 10.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching +maintenance_io_concurrency = 50 # 1-1000; 0 disables prefetching +max_worker_processes = 18 # (change requires restart) +max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +synchronous_commit = remote_apply # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +full_page_writes = on # recover from partial page writes +wal_log_hints = on # also do full page writes of non-critical updates + # (change requires restart) +wal_compression = on # enables compression of full-page writes; + # off, pglz, lz4, zstd, or on +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +wal_buffers = 64MB # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +checkpoint_timeout = 60min # range 30s-1d +checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 20GB +min_wal_size = 160MB + +# - Prefetching during recovery - + +#recovery_prefetch = try # prefetch pages referenced in the WAL? +#wal_decode_buffer_size = 512kB # lookahead window used for prefetching + # (change requires restart) + +# - Archiving - + +archive_mode = on # enables archiving; off, on, or always + # (change requires restart) +#archive_library = '' # library to use to archive a logfile segment + # (empty string indicates archive_command should + # be used) +archive_command = 'if [ -f archive_pause.trigger ]; then exit 1; else if [ -f archive_active.trigger ]; then pg_probackup-16 archive-push -B /mnt/pgbak --instance 16 --wal-file-path %p --wal-file-name %f -j 2 --batch-size=10; else exit 0; fi; fi' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +archive_timeout = 1800s # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +restore_command = 'if [ -f archive_active.trigger ]; then pg_probackup-16 archive-get -B /mnt/pgbak --instance 16 --wal-file-path %p --wal-file-name %f; else exit 0; fi' # command to use to restore an archived WAL file + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +max_replication_slots = 10 # max number of replication slots + # (change requires restart) +wal_keep_size = 4GB # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers +#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +enable_partitionwise_join = on +enable_partitionwise_aggregate = on +#enable_presorted_aggregate = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +random_page_cost = 1.1 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 512MB + +jit_above_cost = 800000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +jit_inline_above_cost = 900000 # inline small functions if query is + # more expensive than this; -1 disables +jit_optimize_above_cost = 900000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +default_statistics_target = 200 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +from_collapse_limit = 6 +#jit = on # allow JIT compilation +join_collapse_limit = 6 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan +#recursive_worktable_factor = 10.0 # range 0.001-1000000 + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, jsonlog, syslog, and + # eventlog, depending on platform. + # csvlog and jsonlog require + # logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr, jsonlog, + # and csvlog into log files. Required + # to be on for csvlogs and jsonlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql-16-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 300MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +log_min_duration_statement = 1000 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +#log_startup_progress_interval = 10s # Time between progress updates for + # long-running startup operations. + # 0 disables the feature, > 0 indicates + # the interval in milliseconds. + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#debug_io_direct = off +#debug_logical_replication_streaming = buffered +#debug_parallel_query = off +log_autovacuum_min_duration = 5000 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +log_checkpoints = on +#log_connections = on +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%m %p-%l app=%a,client=%r %q%u@%d, vxid:%v txid:%x %i ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +log_lock_waits = on # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +log_statement = 'ddl' # none, ddl, mod, all +#log_replication_commands = off +log_temp_files = 0 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Europe/Moscow' + +# - Process Title - + +cluster_name = '16/data' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Cumulative Query and Index Statistics - + +track_activities = on +track_activity_query_size = 16384 # (change requires restart) +track_counts = on +track_io_timing = on +track_wal_io_timing = on +track_functions = all # none, pl, all +#stats_fetch_consistency = cache # cache, none, snapshot + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +autovacuum_max_workers = 10 # max number of autovacuum subprocesses + # (change requires restart) +autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +autovacuum_vacuum_scale_factor = 0.02 # fraction of table size before vacuum +autovacuum_vacuum_insert_scale_factor = 0.02 # fraction of inserts over table + # size before insert vacuum +autovacuum_analyze_scale_factor = 0.01 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +search_path = ' dbo , public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +default_toast_compression = 'lz4' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +xmloption = 'document' +#gin_pending_list_limit = 4MB +#createrole_self_grant = '' # set and/or inherit + +# - Locale and Formatting - + +datestyle = 'iso, dmy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +#icu_validation_level = warning # report ICU locale validation + # errors at the given level + +# default configuration for text search +default_text_search_config = 'pg_catalog.russian' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm' # (change requires restart) + +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +max_locks_per_transaction = 512 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here +pg_stat_statements.max = 10000 +pg_stat_statements.track = all +pg_stat_statements.track_utility = false +pg_stat_statements.track_planning = false +pg_stat_statements.save = true + +auto_explain.log_min_duration = '15s' +auto_explain.log_nested_statements = true +auto_explain.log_analyze = true +auto_explain.log_verbose = true +auto_explain.log_buffers = true +auto_explain.log_timing = true +auto_explain.log_triggers = true +auto_explain.log_format = 'text' +auto_explain.log_parameter_max_length = -1 + +cron.database_name = 'postgres' +cron.use_background_workers = off +cron.timezone = 'Europe/Moscow' +#cron.max_running_jobs = 32 # Maximum number of jobs that can run concurrently + +# config of the shared memory +shared_ispell.max_size = 70MB + +# config on restore shared blocks +pg_prewarm.autoprewarm = false +pg_prewarm.autoprewarm_interval = 300 + +# Email server for sending letters +adm.email_smtp_server = 'mail.company.ru' + +# проверка кода по умолчанию отключена (включаем только на DEV/TEST) +plpgsql_check.mode = disabled +plpgsql_check.fatal_errors = yes +plpgsql_check.show_nonperformance_warnings = true +plpgsql_check.show_performance_warnings = true +plpgsql_check.profiler = off diff --git a/16/docker-pgupgrade/upgrade.sh b/16/docker-pgupgrade/upgrade.sh new file mode 100644 index 0000000..825d91b --- /dev/null +++ b/16/docker-pgupgrade/upgrade.sh @@ -0,0 +1,287 @@ +#!/bin/bash + + +# Set variable + +HOMEDIR=/var/lib/postgresql +OLD_VERSION=$PG_MAJOR_OLD +NEW_VERSION=$PG_MAJOR +OLD_CONF_FILE=$PGDATAOLD +NEW_CONF_FILE=$PGDATANEW +OLD_BIN=$PGBINOLD/ +NEW_BIN=$PGBINNEW/ +OLD_DB=$PGDATAOLD +NEW_DB=$PGDATANEW +OLD_PGLOG=/var/log/postgresql +NEW_PGLOG=/var/log/postgresql +OLD_TSEARCH=$TSEARCHDATAOLD +NEW_TSEARCH=$TSEARCHDATANEW + +function parameter_copy() +{ + # $1 - имя параметра для копирования из старого конфига в новый + OLD_PARAM=`grep "^$1 = " $OLD_CONF_FILE/postgresql.conf | awk -F= '{print $2}' | awk '{print $1}'` + NEW_PARAM=`grep "^$1 = " $NEW_CONF_FILE/postgresql.conf | awk -F= '{print $2}' | awk '{print $1}'` + if [ "$OLD_PARAM" != "" ]; then + # для случая когда в тексте значения параметра есть символ / то экранируем его + OLD_PARAM_R=${OLD_PARAM/'/'/'\/'} + NEW_PARAM_R=${NEW_PARAM/'/'/'\/'} + sed -i.bak "s/$1 = ${NEW_PARAM_R}/$1 = ${OLD_PARAM_R}/g" $NEW_CONF_FILE/postgresql.conf > /dev/null + echo "Копируем параметр $1 = $OLD_PARAM" + fi +} + +if [ "$(id -u)" = '0' ]; then + # запуск от root... исправляем права доступа и перезапускаемся как postgres + mkdir -p "$PGDATAOLD" "$PGDATANEW" $TSEARCHDATANEW + chmod 700 "$PGDATAOLD" "$PGDATANEW" $TSEARCHDATANEW + chown postgres . + chown -R postgres "$PGDATAOLD" "$PGDATANEW" $TSEARCHDATANEW + exec gosu postgres "$BASH_SOURCE" "$@" +fi + +# проверка входных каталогов на всякий случай.... +# каталог старой версии должен содержать ... +COUNT_DIR=`ls -l $OLD_DB | grep "^d" | wc -l` +if [ "$COUNT_DIR" -lt "17" ]; then + echo + echo "Каталог текущей версии: $OLD_DB" + echo "Этот каталог должен указывать на содержимое кластера (ожидается не меньше, чем 17 каталогов)" + exit 1 +fi +if ! [ -f "$OLD_DB/PG_VERSION" ]; then + echo + echo "Каталог текущей версии: $OLD_DB" + echo "Этот каталог должен содержать файл PG_VERSION" + exit 1 +fi +COUNT_DIR=`cat $OLD_DB/PG_VERSION` +if [ "$COUNT_DIR" != "$PG_MAJOR_OLD" ]; then + echo + echo "Каталог текущей версии: $OLD_DB" + echo "Этот каталог должен должен принадлежать версии кластера $PG_MAJOR_OLD" + echo "Обнаружена версия: $COUNT_DIR" + exit 1 +fi +if [ -f "$OLD_DB/postmaster.pid" ]; then + echo + echo "Каталог текущей версии: $OLD_DB" + echo "Обнаружен файл postmaster.pid !" + echo "Текущий кластер завершил работу аварийно или сейчас работает!" + echo "Запустите текущий кластер (при необходимости) и завершите его работу штатно." + exit 1 +fi +# каталог новой версии всегда должен быть пуст... +if [ -d $NEW_DB ]; then + COUNT_DIR=`ls -la $NEW_DB | wc -l` + if [ "$COUNT_DIR" != "3" ]; then + echo + echo "Каталог новой версии: $NEW_DB" + echo "Этот каталог должен быть полностью пуст!" + ls -la $NEW_DB + exit 1 + fi +fi + +# преобразуем в верхний регистр +DATACOPY_MODE="${PGDATACOPY_MODE^^}" +# проверяем... +LINK_OPTIONS="" +if [ "$DATACOPY_MODE" != "COPY" ]; then + # тестируем возможность работы с hard link на подключенных томах + # для режима AUTO или HARDLINK + LINK_OPTIONS="--link" + if ! ln "$PGDATAOLD/global/1213" "$PGDATANEW/1213" > /dev/null 2>&1 ; then + # нет возможности делать HardLink между томами, поэтому опцию --link отключаем + LINK_OPTIONS="" + else + rm -f "$PGDATANEW/1213" + fi +fi + +if [ "$DATACOPY_MODE" = "HARDLINK" ]; then + if [ "$LINK_OPTIONS" = "" ]; then + echo + echo '-- Копирование всего кластера запрещено! --' + echo '-- HardLink недоступен! --' + exit 1 + fi +fi + +if [ ! -s "$PGDATANEW/PG_VERSION" ]; then + # если каталог новой версии ещё не подготовлен, то создаём его + echo "------------" + echo "-- initdb --" + echo "------------" + PGDATA="$PGDATANEW" eval "initdb $POSTGRES_INITDB_ARGS" + cp -f /usr/local/bin/postgresql.conf $PGDATANEW + cp -f /usr/local/bin/pg_ident.conf $PGDATANEW + cp -f /usr/local/bin/pg_hba.conf $PGDATANEW + # создаём начальную версию каталога FTS + tar -xzkf /usr/share/postgresql/$PG_MAJOR/tsearch_data.tar.gz -C /usr/share/postgresql/tsearch_data/ > /dev/null 2>&1 +fi + +echo +echo "-- =================== версии для обновления =================== --" +echo "Старая: $PG_MAJOR_OLD" +echo "Новая: $PG_MAJOR" +echo '-- ============================================================= --' +echo +echo "-- ================= файлы конфигурации ======================== --" +echo "Старый: $OLD_CONF_FILE/postgresql.conf" +echo "Новый: $NEW_CONF_FILE/postgresql.conf" +echo '-- ============================================================= --' +echo +echo "-- ================= каталоги кластеров ======================== --" +echo "Старый: $OLD_DB" +echo "Новый: $NEW_DB" +echo '-- ============================================================= --' +echo +echo '-- ============================================================= --' +if [ "$LINK_OPTIONS" = "" ] ; then + echo '-- Режим полного копирования кластера! --' + if [ "$DATACOPY_MODE" != "COPY" ]; then + echo '-- HardLink недоступен! --' + fi +else + echo '-- HardLink включен --' +fi +echo '-- ============================================================= --' +echo +echo '-- ============================================================= --' +echo '-- CHECK upgrade --' +echo '-- ============================================================= --' + +cd $HOMEDIR + +echo "Время старта проверки кластера на совместимость ..." > $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt +date >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt + +# для нового сервера берём некоторые параметры такие же как и у прежнего +parameter_copy "max_connections" +parameter_copy "shared_buffers" +parameter_copy "huge_pages" +parameter_copy "work_mem" +parameter_copy "timezone" +parameter_copy "wal_level" +parameter_copy "effective_cache_size" +parameter_copy "maintenance_work_mem" +parameter_copy "autovacuum_work_mem" +parameter_copy "max_prepared_transactions" +parameter_copy "logical_decoding_work_mem" +parameter_copy "max_worker_processes" +parameter_copy "max_parallel_maintenance_workers" +parameter_copy "max_parallel_workers_per_gather" +parameter_copy "max_parallel_workers" +parameter_copy "cron.timezone" +parameter_copy "min_dynamic_shared_memory" +parameter_copy "xmloption" + +echo +echo 'старт check...' +echo +if ! ${NEW_BIN}pg_upgrade --check $LINK_OPTIONS --jobs=4 -d ${OLD_DB} -D ${NEW_DB} -b ${OLD_BIN} -B ${NEW_BIN} \ + -o "-c shared_buffers=100MB" \ + -o "-c shared_preload_libraries='plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm'" \ + -o "-c shared_ispell.max_size=70MB" \ + -o "-c huge_pages=off" \ + -o "-c config_file=$OLD_CONF_FILE/postgresql.conf" \ + -O "-c shared_buffers=100MB" \ + -O "-c shared_preload_libraries='plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm'" \ + -O "-c shared_ispell.max_size=70MB" \ + -O "-c huge_pages=off" \ + -O "-c config_file=$NEW_CONF_FILE/postgresql.conf" \ + -O "-c log_directory=$NEW_PGLOG" \ + -O "-c log_filename=pg_upgrade.log"; then + + echo + echo '-- ============================================================= --' + echo '-- ! Проверка на совместимость кластеров не выполнена ! --' + echo '-- ============================================================= --' + echo + + exit 1 +fi + +echo +echo '-- ============================================================= --' +echo '-- Process upgrade! --' +echo '-- ============================================================= --' +echo + +echo "" >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt +echo "Время старта операции upgrade кластера ..." >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt +date >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt + +echo +echo "копируем из исходного сервера в новый сервер файл конфигурации pg_hba.conf" +echo +cp -f $OLD_CONF_FILE/pg_hba.conf $NEW_CONF_FILE/pg_hba.conf + +echo +echo "копируем из исходного сервера в новый сервер файл конфигурации pg_ident.conf" +echo +cp -f $OLD_CONF_FILE/pg_ident.conf $NEW_CONF_FILE/pg_ident.conf + +echo +echo "копируем из исходного сервера в новый сервер все кастомные определения синонимов и тезаурусов" +echo +cd ${OLD_TSEARCH} +# копируем из исходного сервера в новый сервер все кастомные определения синонимов +for fts in $(ls *.syn) +do + if [ "${fts}" != "synonym_sample.syn" ] ; then + cp -f "${OLD_TSEARCH}/${fts}" "${NEW_TSEARCH}/${fts}" + chmod 0644 "${NEW_TSEARCH}/${fts}" + echo "cp -f \"${OLD_TSEARCH}/${fts}\" \"${NEW_TSEARCH}/${fts}\"" >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt + fi +done +# копируем из исходного сервера в новый сервер все кастомные определения тезаурусов +for fts in $(ls *.ths) +do + if [ "${fts}" != "thesaurus_sample.ths" ] ; then + cp -f "${OLD_TSEARCH}/${fts}" "${NEW_TSEARCH}/${fts}" + chmod 0644 "${NEW_TSEARCH}/${fts}" + echo "cp -f \"${OLD_TSEARCH}/${fts}\" \"${NEW_TSEARCH}/${fts}\"" >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt + fi +done + +cd $HOMEDIR + +echo +echo 'старт upgrade...' +echo + +if ! ${NEW_BIN}pg_upgrade $LINK_OPTIONS --jobs=4 -d ${OLD_DB} -D ${NEW_DB} -b ${OLD_BIN} -B ${NEW_BIN} \ + -o "-c shared_buffers=100MB" \ + -o "-c shared_preload_libraries='plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm'" \ + -o "-c shared_ispell.max_size=70MB" \ + -o "-c huge_pages=off" \ + -o "-c config_file=$OLD_CONF_FILE/postgresql.conf" \ + -O "-c shared_buffers=100MB" \ + -O "-c shared_preload_libraries='plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm'" \ + -O "-c shared_ispell.max_size=70MB" \ + -O "-c huge_pages=off" \ + -O "-c config_file=$NEW_CONF_FILE/postgresql.conf" \ + -O "-c log_directory=$NEW_PGLOG" \ + -O "-c log_filename=pg_upgrade.log"; then + # восстанавливаем pg_control файл для прежнего сервера. + if [ -f ${OLD_DB}/global/pg_control.old ] ; then + echo + echo "Файл pg_control был восстановлен на старом сервере" + mv ${OLD_DB}/global/pg_control.old ${OLD_DB}/global/pg_control + fi + + echo "Ошибки при мажорном обновлении" + exit 1 +fi + +echo "" >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt +echo "Время завершения upgrade кластера ..." >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt +date >> $NEW_PGLOG/upgrade_to_${NEW_VERSION}.txt + +echo "===========================================" +echo "Мажорное обновление выполнено успешно." +echo "Данные старого сервера могут быть удалены." +echo "===========================================" + diff --git a/16/docker-postgres/Dockerfile b/16/docker-postgres/Dockerfile new file mode 100644 index 0000000..f960e84 --- /dev/null +++ b/16/docker-postgres/Dockerfile @@ -0,0 +1,160 @@ +# Based on: +# https://hub.docker.com/_/postgres +# https://github.com/docker-library/postgres +# https://hub.docker.com/r/postgis/postgis +# https://github.com/postgis/docker-postgis +# +FROM postgres:16.2 + +LABEL maintainer="Sergey Grinko " + +ENV LANG ru_RU.utf8 + +ENV POSTGIS_MAJOR 3 +ENV DEBIAN_RELEASE bookworm +ENV BACKUP_PATH /mnt/pgbak +ENV POSTGRES_INITDB_ARGS "--locale=ru_RU.UTF8 --data-checksums" +ENV RUM_VERSION 1.3.13 + +RUN echo ru_RU.UTF-8 UTF-8 >> /etc/locale.gen; locale-gen \ + && apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates jq wget freetds-dev freetds-common git make gcc postgresql-server-dev-$PG_MAJOR libicu-dev sendemail htop mc systemtap-sdt-dev vim \ + # подключаем репозитарий архивной утилиты + && echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $DEBIAN_RELEASE main-$DEBIAN_RELEASE" > /etc/apt/sources.list.d/pg_probackup.list \ + && wget -qO - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG-PROBACKUP | tee /etc/apt/trusted.gpg.d/pg_probackup.asc \ + && apt-get update \ + && apt-cache showpkg postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ + && apt-get install -y --no-install-recommends \ + postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR \ + postgresql-$PG_MAJOR-postgis-$POSTGIS_MAJOR-scripts \ + pg-probackup-$PG_MAJOR \ + postgresql-plpython3-$PG_MAJOR \ + postgresql-$PG_MAJOR-repack \ + postgresql-$PG_MAJOR-pldebugger \ + postgresql-$PG_MAJOR-plpgsql-check \ + postgresql-$PG_MAJOR-tds-fdw \ + libkrb5-dev \ + && sed -i "s/;\ttext size = 64512/\ttext size = 1262485504/g" /etc/freetds/freetds.conf \ + && git config --global http.sslverify false \ + && mkdir -p /tmp/build_ext \ + && cd /tmp/build_ext \ + && rm -rf /tmp/build_ext/* \ +# ====== hunspell_dicts + && git clone https://github.com/postgrespro/hunspell_dicts \ + && cd hunspell_dicts \ + && cd hunspell_en_us \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ + && cd ../hunspell_ru_ru_aot \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_tsparser + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/pg_tsparser \ + && cd pg_tsparser \ + && make USE_PGXS=1 install \ +# ====== shared_ispell + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/shared_ispell \ + && cd shared_ispell \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_variables + && cd /tmp/build_ext \ + && git clone https://github.com/xinferum/pg_variables \ + && cd pg_variables \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== rum + && cd /tmp/build_ext \ + && git clone https://github.com/postgrespro/rum --branch $RUM_VERSION --single-branch \ + && cd rum \ + && make USE_PGXS=1 \ + && make USE_PGXS=1 install \ +# ====== pg_cron + && cd /tmp/build_ext \ + && git clone https://github.com/citusdata/pg_cron \ + && cd pg_cron \ + && make \ + && make install \ +# ====== pg_dbo_timestamp + && cd /tmp/build_ext \ + && git clone https://github.com/pgcodekeeper/pg_dbo_timestamp \ + && cd pg_dbo_timestamp \ + && make USE_PGXS=1 install \ +# ====== pg_background + && cd /tmp/build_ext \ + && git clone https://github.com/vibhorkum/pg_background \ + && cd pg_background \ + && make \ + && make install \ +# ====== create backup path ... + && mkdir -p $BACKUP_PATH \ + && chmod 0777 $BACKUP_PATH \ + && chmod 0777 /var/log/postgresql \ + && chown postgres:postgres $BACKUP_PATH /var/log/postgresql \ +# ====== make files on folder tsearch_data for mapping ... + && cd /usr/share/postgresql/$PG_MAJOR/tsearch_data \ + && tar -czf /usr/share/postgresql/$PG_MAJOR/tsearch_data.tar.gz . \ + && cd / \ + && ln -s /usr/share/postgresql/$PG_MAJOR/tsearch_data /usr/share/postgresql/ \ +# ====== clean all unused package... + && apt-get purge -y make gcc gcc-12 cpp cpp-12 clang* golang* postgresql-server-dev-$PG_MAJOR *-dev *-man \ + && apt-get -f install \ + && apt-get -y autoremove \ + && apt-get -y clean \ + && apt-get -y autoclean \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /var/cache/apt/* \ + && mkdir -p /docker-entrypoint-initdb.d \ + && mkdir -p /app_db_init_sql \ + && chmod 0777 /app_db_init_sql + +COPY ./locales.conf /etc/locales.conf +# copy bash files +COPY ./initdb-extension.sh /docker-entrypoint-initdb.d/10_extension.sh +COPY ./update-extension.sh /usr/local/bin/ +COPY ./backup.sh /usr/local/bin/ +COPY ./show.sh /usr/local/bin/ +# copy sql files +COPY ./sql/*.sql /usr/local/bin/ +# copy postgres files +COPY ./*.conf /usr/local/bin/ +COPY ./postgres /usr/local/sbin/ + +RUN chmod +x /usr/local/bin/*.sh \ + && chmod +x /usr/local/sbin/postgres \ + && chmod +x /docker-entrypoint-initdb.d/*.sh \ + && chown postgres:postgres /usr/local/bin/pg_hba.conf /usr/local/bin/pg_ident.conf /usr/local/bin/postgresql.conf + +# We set the default STOPSIGNAL to SIGINT, which corresponds to what PostgreSQL +# calls "Fast Shutdown mode" wherein new connections are disallowed and any +# in-progress transactions are aborted, allowing PostgreSQL to stop cleanly and +# flush tables to disk, which is the best compromise available to avoid data +# corruption. +# +# Users who know their applications do not keep open long-lived idle connections +# may way to use a value of SIGTERM instead, which corresponds to "Smart +# Shutdown mode" in which any existing sessions are allowed to finish and the +# server stops when all sessions are terminated. +# +# See https://www.postgresql.org/docs/12/server-shutdown.html for more details +# about available PostgreSQL server shutdown signals. +# +# See also https://www.postgresql.org/docs/12/server-start.html for further +# justification of this as the default value, namely that the example (and +# shipped) systemd service files use the "Fast Shutdown mode" for service +# termination. +# +STOPSIGNAL SIGINT +# +# An additional setting that is recommended for all users regardless of this +# value is the runtime "--stop-timeout" (or your orchestrator/runtime's +# equivalent) for controlling how long to wait between sending the defined +# STOPSIGNAL and sending SIGKILL (which is likely to cause data corruption). +# +# The default in most runtimes (such as Docker) is 10 seconds, and the +# documentation at https://www.postgresql.org/docs/12/server-start.html notes +# that even 90 seconds may not be long enough in many instances. diff --git a/16/docker-postgres/backup.sh b/16/docker-postgres/backup.sh new file mode 100644 index 0000000..f82006f --- /dev/null +++ b/16/docker-postgres/backup.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# $1 - the type mode backup: delta or page (default) or full (create full backup) +# $2 - the sign stream wal mode backup: "yes" or "stream" (default) or other to sign "archive" +# $3 - the count threads: 4 (default) or this number + +# calculate day week +DOW=$(date +%u) + +# Processing external variables + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_THREADS" = "" ]; then + BACKUP_THREADS=4 +fi + +if [ "$BACKUP_STREAM" = "" ]; then + BACKUP_STREAM="stream" +fi +if [[ "$BACKUP_STREAM" = "yes" || "$BACKUP_STREAM" = "stream" ]]; then + BACKUP_STREAM="--stream" +else + BACKUP_STREAM="" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +# Processing external parameters. Priority! + +if [ "$DOW" = "6" ] ; then + # make a full backup once a week (Saturday) + BACKUPMODE=full +else + # make an incremental backup on other days of the week + BACKUPMODE=page +fi +if [ "$BACKUP_MODE" != "" ]; then + # The backup creation mode is given forcibly + BACKUPMODE=$BACKUP_MODE +fi + +BACKUP_STREAM="--stream" +if [ "$2" != "" ]; then + if [[ "$2" = "stream" || "$2" = "yes" ]]; then + BACKUP_STREAM="--stream" + else + BACKUP_STREAM="" + fi +fi + +if [ "$3" != "" ]; then + BACKUP_THREADS=$3 +fi + +cd $BACKUP_PATH + +COUNT_DIR=`ls -l $BACKUP_PATH | grep "^d" | wc -l` + +if [ "$COUNT_DIR" = "0" ]; then + # init new directory for backup + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR init -B $BACKUP_PATH -D $PGDATA" +fi + +if ! [ -d "$BACKUP_PATH/backups/$PG_MAJOR" ]; then + # create new instance for claster + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR add-instance -B $BACKUP_PATH --instance=$PG_MAJOR -D $PGDATA" + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR set-config -B $BACKUP_PATH --instance=$PG_MAJOR --retention-window=30 --compress-algorithm=zlib --compress-level=6" +fi + +IS_FULL=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --instance=$PG_MAJOR --backup-path=$BACKUP_PATH | grep FULL | grep 'OK\|DONE'"` + + +if ! [ -f $PGDATA/archive_active.trigger ] ; then + su - postgres -c "touch $PGDATA/archive_active.trigger" +fi + +if [[ "$IS_FULL" = "" || $BACKUPMODE = "full" ]] ; then + # Full backup needs to be forcibly + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" +else + # Backup type depends on day or input parameter + if [[ $BACKUPMODE = "merge" ]]; then + # в этом режиме здесь всегда PAGE + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b page $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + else + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR --backup-path=$BACKUP_PATH -b $BACKUPMODE $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi + STATUS=`su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --format=json | jq -c '.[].backups[0].status'"` + LAST_STATE=${STATUS//'"'/''} + if [[ "$LAST_STATE" = "CORRUPT" || "$LAST_STATE" = "ERROR" || "$LAST_STATE" = "ORPHAN" ]] ; then + # You need to run a full backup, as an error occurred with incremental + # Perhaps the loss of the segment at Failover ... + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR backup --backup-path=$BACKUP_PATH -b full $BACKUP_STREAM --instance=$PG_MAJOR -w --threads=$BACKUP_THREADS" + fi +fi + +if [[ $BACKUPMODE = "merge" ]] ; then + # объединяем старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --merge-expired --no-validate --threads=$BACKUP_THREADS" +else + # чистим старые бэкапы в соответствии с настройками + su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR delete --backup-path=$BACKUP_PATH --instance=$PG_MAJOR --delete-expired --delete-wal --threads=$BACKUP_THREADS" +fi + +# collecting statistics on backups +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt" + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`su - postgres -c "grep -c ERROR ~postgres/backups.txt"` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +cat ~postgres/backups.txt + +# send mail to DBA +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-postgres/initdb-extension.sh b/16/docker-postgres/initdb-extension.sh new file mode 100644 index 0000000..0fbabed --- /dev/null +++ b/16/docker-postgres/initdb-extension.sh @@ -0,0 +1,95 @@ +#!/bin/sh + +set -e + +if [ "$POSTGRES_USER" = "" ]; then + POSTGRES_USER=postgres +fi +if [ "$POSTGRES_DB" = "" ]; then + POSTGRES_DB=postgres +fi +if [ "$DEV_SCHEMA" = "" ]; then + DEV_SCHEMA=dbo +fi +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi +if [ "$ENV_DB_VALUE" = "" ]; then + ENV_DB_VALUE=DEV +fi + +# Perform all actions as $POSTGRES_USER +export PGUSER="$POSTGRES_USER" +export PGDATABASE="$POSTGRES_DB" + +POSTGIS_VERSION="${POSTGIS_VERSION%%+*}" + +echo "------------" +echo "-- initdb --" +echo "------------" + +# Check on the need to initialize the catalog of the FTS +COUNT_DIR=`ls -l /usr/share/postgresql/tsearch_data/ | wc -l` +if [ "$COUNT_DIR" = "1" ]; then + # init new directory for FTS + echo "# restore files on FTS folder ..." + tar -xzf /usr/share/postgresql/$PG_MAJOR/tsearch_data.tar.gz -C /usr/share/postgresql/tsearch_data/ +fi + +# copy start configuration files to pgdata +if [ 'md5' != "$POSTGRES_HOST_AUTH_METHOD" ]; then + # create entrypoint for trust or another method access + sed "s/md5/$POSTGRES_HOST_AUTH_METHOD/g" /usr/local/bin/pg_hba.conf > $PGDATA/pg_hba.conf +else + # the default is password entry (md5) + cp -f /usr/local/bin/pg_hba.conf $PGDATA +fi +cp -f /usr/local/bin/pg_ident.conf $PGDATA +cp -f /usr/local/bin/postgresql.conf $PGDATA +if [ -n "$TZ" ]; then + # specifies a specific time zone for the server time zone + sed -i "s!timezone = 'UTC'!timezone = '$TZ'!g" $PGDATA/postgresql.conf + sed -i "s!cron.timezone = 'UTC'!cron.timezone = '$TZ'!g" $PGDATA/postgresql.conf +fi + +# specifies a specific Email server for sending letters +sed -i "s!adm.email_smtp_server = 'mail.company.ru'!adm.email_smtp_server = '$EMAIL_SERVER'!g" $PGDATA/postgresql.conf + +psql -c "select pg_reload_conf();" + +cd /usr/local/bin/ + +# Create the 'template_extension' template DB and application DB +if [ "$APP_DB" != "" ]; then + psql -f pre.sql -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" -v APP_DB="$APP_DB" -v APP_DB_PASSWORD="$APP_DB_PASSWORD" +else + psql -f pre.sql -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" +fi + +# Load extension into template_extension database and $POSTGRES_DB +for DB in "$POSTGRES_DB" template_extension "$APP_DB" ; do + if [ -n "$DB" ]; then + echo "Loading extensions into $DB" + + psql --dbname="$DB" -f db_all.sql -v email_server="$EMAIL_SERVER" + + if [ "$DB" = "postgres" ] ; then + psql --dbname="$DB" -f db_postgres.sql -v email_server="$EMAIL_SERVER" -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" + else + psql --dbname="$DB" -f db_notpostgres.sql -v IS_SETUPDB=false -v DEV_SCHEMA="$DEV_SCHEMA" -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" -v email_server="$EMAIL_SERVER" -v environment_db_value="$ENV_DB_VALUE" + if [ "$DB" != "template_extension" ] ; then + psql --dbname="$DB" -f db_target.sql -v DEV_SCHEMA="$DEV_SCHEMA" -v email_server="$EMAIL_SERVER" -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" + fi + if [ "$DB" = "$APP_DB" ] ; then + echo " Иннициализируем БД всеми скриптами из каталога /app_db_init_sql ..." + for file_sql in $(ls /app_db_init_sql/*.sql) ; do + echo "Файл: $file_sql" + psql --dbname="$DB" -f "$file_sql" + done + fi + fi + fi +done + +psql -XtqA -f post.sql | psql -v POSTGRES_PASSWORD="$POSTGRES_PASSWORD" -v DEPLOY_PASSWORD="$DEPLOY_PASSWORD" -v PGBOUNCER_PASSWORD="$PGBOUNCER_PASSWORD" +psql -f post_warning.sql diff --git a/16/docker-postgres/locales.conf b/16/docker-postgres/locales.conf new file mode 100644 index 0000000..463a6c6 --- /dev/null +++ b/16/docker-postgres/locales.conf @@ -0,0 +1,7 @@ +[default] + date format = %Y-%m-%d %H:%M:%S.%z + +[en_US] + date format = %Y-%m-%d %H:%M:%S.%z + language = us_english + charset = iso_1 diff --git a/16/docker-postgres/pg_hba.conf b/16/docker-postgres/pg_hba.conf new file mode 100644 index 0000000..bd5f6c0 --- /dev/null +++ b/16/docker-postgres/pg_hba.conf @@ -0,0 +1,102 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# non-SSL TCP/IP socket. Similarly, "hostgssenc" uses a +# GSSAPI-encrypted TCP/IP socket, while "hostnogssenc" uses a +# non-GSSAPI socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + +# CAUTION: Configuring the system for local "trust" authentication +# allows any local user to connect as any PostgreSQL user, including +# the database superuser. If you do not trust all your local users, +# use another authentication method. + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all peer + +# IPv4 local connections: +host all all 127.0.0.1/32 trust +host all all localhost trust + +# IPv6 local connections: +host all all ::1/128 trust + +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all trust +host replication all all trust + +host all all all md5 diff --git a/16/docker-postgres/pg_ident.conf b/16/docker-postgres/pg_ident.conf new file mode 100644 index 0000000..2a21033 --- /dev/null +++ b/16/docker-postgres/pg_ident.conf @@ -0,0 +1,43 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +main postgres backup \ No newline at end of file diff --git a/16/docker-postgres/postgres b/16/docker-postgres/postgres new file mode 100644 index 0000000..b7355c2 --- /dev/null +++ b/16/docker-postgres/postgres @@ -0,0 +1,9 @@ +#!/bin/bash + +# this is start file for postgres + +# Check on the need to initialize the catalog of the FTS +tar -xzkf /usr/share/postgresql/$PG_MAJOR/tsearch_data.tar.gz -C /usr/share/postgresql/tsearch_data/ > /dev/null 2>&1 + +# start postgres process ... +exec /usr/lib/postgresql/$PG_MAJOR/bin/postgres ${@} diff --git a/16/docker-postgres/postgresql.conf b/16/docker-postgres/postgresql.conf new file mode 100644 index 0000000..e1e427f --- /dev/null +++ b/16/docker-postgres/postgresql.conf @@ -0,0 +1,861 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: B = bytes Time units: us = microseconds +# kB = kilobytes ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +port = 5432 # (change requires restart) +max_connections = 140 # (change requires restart) +#reserved_connections = 0 # (change requires restart) +superuser_reserved_connections = 5 # (change requires restart) +unix_socket_directories = '/var/run/postgresql, /tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP settings - +# see "man tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default +#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds; + # 0 selects the system default + +#client_connection_check_interval = 0 # time between checks for client + # disconnection while running queries; + # 0 for never + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +password_encryption = md5 # scram-sha-256 or md5 +#scram_iterations = 4096 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab' +#krb_caseins_users = off +#gss_accept_delegation = off + +# - SSL - + +#ssl = on +#ssl_ca_file = '/var/lib/pgsql/certs/kscl.crt' +#ssl_cert_file = '/var/lib/pgsql/certs/psql.crt' +#ssl_crl_file = '' +#ssl_crl_dir = '' +#ssl_key_file = '/var/lib/pgsql/certs/psql.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_min_protocol_version = 'TLSv1.2' +#ssl_max_protocol_version = '' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 256MB # min 128kB + # (change requires restart) +huge_pages = try # on, off, or try + # (change requires restart) +#huge_page_size = 0 # zero for system default + # (change requires restart) +temp_buffers = 32MB # min 800kB +max_prepared_transactions = 200 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +work_mem = 20MB # min 64kB +#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem +maintenance_work_mem = 256MB # min 1MB +autovacuum_work_mem = 128MB # min 1MB, or -1 to use maintenance_work_mem +logical_decoding_work_mem = 256MB # min 64kB +#max_stack_depth = 2MB # min 100kB +#shared_memory_type = mmap # the default is the first option + # supported by the operating system: + # mmap + # sysv + # windows + # (change requires restart) +dynamic_shared_memory_type = posix # the default is usually the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # (change requires restart) +min_dynamic_shared_memory = 128MB # (change requires restart) +#vacuum_buffer_usage_limit = 256kB # size of vacuum and analyze buffer access strategy ring; + # 0 to disable vacuum buffer access strategy; + # range 128kB to 16GB + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kilobytes, or -1 for no limit + +# - Kernel Resources - + +max_files_per_process = 4096 # min 64 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +vacuum_cost_delay = 1 # 0-100 milliseconds (0 disables) +#vacuum_cost_page_hit = 1 # 0-10000 credits +vacuum_cost_page_miss = 2 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +vacuum_cost_limit = 10000 # 1-10000 credits + +# - Background Writer - + +bgwriter_delay = 10ms # 10-10000ms between rounds +bgwriter_lru_maxpages = 1600 # max buffers written/round, 0 disables +bgwriter_lru_multiplier = 10.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#backend_flush_after = 0 # measured in pages, 0 disables +effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching +maintenance_io_concurrency = 50 # 1-1000; 0 disables prefetching +max_worker_processes = 18 # (change requires restart) +max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +max_parallel_maintenance_workers = 2 # taken from max_parallel_workers +max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel operations +parallel_leader_participation = on +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) + + +#------------------------------------------------------------------------------ +# WRITE-AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +synchronous_commit = remote_apply # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +full_page_writes = on # recover from partial page writes +wal_log_hints = on # also do full page writes of non-critical updates + # (change requires restart) +wal_compression = on # enables compression of full-page writes; + # off, pglz, lz4, zstd, or on +#wal_init_zero = on # zero-fill new WAL files +#wal_recycle = on # recycle WAL files +wal_buffers = 64MB # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables +#wal_skip_threshold = 2MB + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +checkpoint_timeout = 60min # range 30s-1d +checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables +max_wal_size = 20GB +min_wal_size = 160MB + +# - Prefetching during recovery - + +#recovery_prefetch = try # prefetch pages referenced in the WAL? +#wal_decode_buffer_size = 512kB # lookahead window used for prefetching + # (change requires restart) + +# - Archiving - + +archive_mode = on # enables archiving; off, on, or always + # (change requires restart) +#archive_library = '' # library to use to archive a logfile segment + # (empty string indicates archive_command should + # be used) +archive_command = 'if [ -f archive_pause.trigger ]; then exit 1; else if [ -f archive_active.trigger ]; then pg_probackup-16 archive-push -B /mnt/pgbak --instance 16 --wal-file-path %p --wal-file-name %f -j 2 --batch-size=10; else exit 0; fi; fi' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +archive_timeout = 1800s # force a logfile segment switch after this + # number of seconds; 0 disables + +# - Archive Recovery - + +# These are only used in recovery mode. + +restore_command = 'if [ -f archive_active.trigger ]; then pg_probackup-16 archive-get -B /mnt/pgbak --instance 16 --wal-file-path %p --wal-file-name %f; else exit 0; fi' # command to use to restore an archived WAL file + # placeholders: %p = path of file to restore + # %f = file name only + # e.g. 'cp /mnt/server/archivedir/%f %p' +#archive_cleanup_command = '' # command to execute at every restartpoint +#recovery_end_command = '' # command to execute at completion of recovery + +# - Recovery Target - + +# Set these only when performing a targeted recovery. + +#recovery_target = '' # 'immediate' to end recovery as soon as a + # consistent state is reached + # (change requires restart) +#recovery_target_name = '' # the named restore point to which recovery will proceed + # (change requires restart) +#recovery_target_time = '' # the time stamp up to which recovery will proceed + # (change requires restart) +#recovery_target_xid = '' # the transaction ID up to which recovery will proceed + # (change requires restart) +#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed + # (change requires restart) +#recovery_target_inclusive = on # Specifies whether to stop: + # just after the specified recovery target (on) + # just before the recovery target (off) + # (change requires restart) +#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID + # (change requires restart) +#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown' + # (change requires restart) + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the primary and on any standby that will send replication data. + +max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +max_replication_slots = 10 # max number of replication slots + # (change requires restart) +wal_keep_size = 4GB # in megabytes; 0 disables +#max_slot_wal_keep_size = -1 # in megabytes; -1 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Primary Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all + +# - Standby Servers - + +# These settings are ignored on a primary server. + +#primary_conninfo = '' # connection string to sending server +#primary_slot_name = '' # replication slot on sending server +hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name + # is not set +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from primary + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt +#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers +#max_parallel_apply_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_async_append = on +#enable_bitmapscan = on +#enable_gathermerge = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_incremental_sort = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_memoize = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_parallel_hash = on +#enable_partition_pruning = on +enable_partitionwise_join = on +enable_partitionwise_aggregate = on +#enable_presorted_aggregate = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +random_page_cost = 1.1 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +effective_cache_size = 512MB + +jit_above_cost = 800000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +jit_inline_above_cost = 900000 # inline small functions if query is + # more expensive than this; -1 disables +jit_optimize_above_cost = 900000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +default_statistics_target = 200 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +from_collapse_limit = 6 +#jit = on # allow JIT compilation +join_collapse_limit = 6 # 1 disables collapsing of explicit + # JOIN clauses +#plan_cache_mode = auto # auto, force_generic_plan or + # force_custom_plan +#recursive_worktable_factor = 10.0 # range 0.001-1000000 + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, jsonlog, syslog, and + # eventlog, depending on platform. + # csvlog and jsonlog require + # logging_collector to be on. + +# This is used when logging to stderr: +logging_collector = on # Enable capturing of stderr, jsonlog, + # and csvlog into log files. Required + # to be on for csvlogs and jsonlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +log_directory = '/var/log/postgresql' # directory where log files are written, + # can be absolute or relative to PGDATA +log_filename = 'postgresql-16-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +log_rotation_size = 300MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. +log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (Windows): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +log_min_duration_statement = 1000 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + +#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements + # and their durations, > 0 logs only a sample of + # statements running at least this number + # of milliseconds; + # sample fraction is determined by log_statement_sample_rate + +#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding + # log_min_duration_sample to be logged; + # 1.0 logs all such statements, 0.0 never logs + + +#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements + # are logged regardless of their duration; 1.0 logs all + # statements from all transactions, 0.0 never logs + +#log_startup_progress_interval = 10s # Time between progress updates for + # long-running startup operations. + # 0 disables the feature, > 0 indicates + # the interval in milliseconds. + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#debug_io_direct = off +#debug_logical_replication_streaming = buffered +#debug_parallel_query = off +log_autovacuum_min_duration = 5000 # log autovacuum activity; + # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +log_checkpoints = on +#log_connections = on +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%m %p-%l app=%a,client=%r %q%u@%d, vxid:%v txid:%x %i ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %b = backend type + # %p = process ID + # %P = process ID of parallel group leader + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %Q = query ID (0 if none or not computed) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +log_lock_waits = on # log lock waits >= deadlock_timeout +#log_recovery_conflict_waits = off # log standby recovery conflict waits + # >= deadlock_timeout +#log_parameter_max_length = -1 # when logging statements, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +#log_parameter_max_length_on_error = 0 # when logging an error, limit logged + # bind-parameter values to N bytes; + # -1 means print in full, 0 disables +log_statement = 'ddl' # none, ddl, mod, all +#log_replication_commands = off +log_temp_files = 0 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'Europe/Moscow' + +# - Process Title - + +cluster_name = '16/data' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Cumulative Query and Index Statistics - + +track_activities = on +track_activity_query_size = 16384 # (change requires restart) +track_counts = on +track_io_timing = on +track_wal_io_timing = on +track_functions = all # none, pl, all +#stats_fetch_consistency = cache # cache, none, snapshot + + +# - Monitoring - + +#compute_query_id = auto +#log_statement_stats = off +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +autovacuum_max_workers = 10 # max number of autovacuum subprocesses + # (change requires restart) +autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts + # before vacuum; -1 disables insert + # vacuums +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +autovacuum_vacuum_scale_factor = 0.02 # fraction of table size before vacuum +autovacuum_vacuum_insert_scale_factor = 0.02 # fraction of inserts over table + # size before insert vacuum +autovacuum_analyze_scale_factor = 0.01 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +search_path = ' dbo , public' # schema names +#row_security = on +#default_table_access_method = 'heap' +#default_tablespace = '' # a tablespace name, '' uses the default +default_toast_compression = 'lz4' # 'pglz' or 'lz4' +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#idle_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_table_age = 150000000 +#vacuum_freeze_min_age = 50000000 +#vacuum_failsafe_age = 1600000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_failsafe_age = 1600000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +xmloption = 'document' +#gin_pending_list_limit = 4MB +#createrole_self_grant = '' # set and/or inherit + +# - Locale and Formatting - + +datestyle = 'iso, dmy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 1 # min -15, max 3; any value >0 actually + # selects precise output mode +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'en_US.UTF-8' # locale for system error message + # strings +lc_monetary = 'en_US.UTF-8' # locale for monetary formatting +lc_numeric = 'en_US.UTF-8' # locale for number formatting +lc_time = 'en_US.UTF-8' # locale for time formatting + +#icu_validation_level = warning # report ICU locale validation + # errors at the given level + +# default configuration for text search +default_text_search_config = 'pg_catalog.russian' + +# - Shared Library Preloading - + +#local_preload_libraries = '' +#session_preload_libraries = '' +shared_preload_libraries = 'plugin_debugger,plpgsql_check,pg_stat_statements,auto_explain,pg_buffercache,pg_cron,shared_ispell,pg_prewarm' # (change requires restart) + +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#gin_fuzzy_search_limit = 0 + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +max_locks_per_transaction = 512 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) +#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here +pg_stat_statements.max = 10000 +pg_stat_statements.track = all +pg_stat_statements.track_utility = false +pg_stat_statements.track_planning = false +pg_stat_statements.save = true + +auto_explain.log_min_duration = '15s' +auto_explain.log_nested_statements = true +auto_explain.log_analyze = true +auto_explain.log_verbose = true +auto_explain.log_buffers = true +auto_explain.log_timing = true +auto_explain.log_triggers = true +auto_explain.log_format = 'text' +auto_explain.log_parameter_max_length = -1 + +cron.database_name = 'postgres' +cron.use_background_workers = off +cron.timezone = 'Europe/Moscow' +#cron.max_running_jobs = 32 # Maximum number of jobs that can run concurrently + +# config of the shared memory +shared_ispell.max_size = 70MB + +# config on restore shared blocks +pg_prewarm.autoprewarm = false +pg_prewarm.autoprewarm_interval = 300 + +# Email server for sending letters +adm.email_smtp_server = 'mail.company.ru' + +# проверка кода по умолчанию отключена (включаем только на DEV/TEST) +plpgsql_check.mode = disabled +plpgsql_check.fatal_errors = yes +plpgsql_check.show_nonperformance_warnings = true +plpgsql_check.show_performance_warnings = true +plpgsql_check.profiler = off diff --git a/16/docker-postgres/show.sh b/16/docker-postgres/show.sh new file mode 100644 index 0000000..2c2d4ed --- /dev/null +++ b/16/docker-postgres/show.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# $1 - yes/no (the sign for send to email, yes - default) +# $2 - list of email recipients (separated by a space) + +if [ "$EMAILTO" = "" ]; then + EMAILTO="DBA-PostgreSQL@company.ru" +fi + +if [ "$EMAIL_SERVER" = "" ]; then + EMAIL_SERVER=mail.company.ru +fi + +if [ "$EMAIL_HOSTNAME" = "" ]; then + EMAIL_HOSTNAME=`hostname` + EMAIL_HOSTNAME="noreplay@${EMAIL_HOSTNAME}.ru" +fi + +if [ "$EMAIL_SEND" = "" ]; then + EMAIL_SEND="yes" +fi + +if [ "$BACKUP_PATH" = "" ]; then + BACKUP_PATH="/mnt/pgbak" +fi + +if [ "$1" != "" ]; then + EMAIL_SEND=$1 +fi + +if [ "$2" != "" ]; then + EMAILTO="$2" +fi + +cd $BACKUP_PATH + +# send mail to DBA +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH > ~postgres/backups.txt" +su - postgres -c "/usr/bin/pg_probackup-$PG_MAJOR show --backup-path=$BACKUP_PATH --archive >> ~postgres/backups.txt" + +echo "" >> ~postgres/backups.txt +echo "Место на бэкапном устройстве:" >> ~postgres/backups.txt +df -h $BACKUP_PATH >> ~postgres/backups.txt + +ERRORS_COUNT=`su - postgres -c "grep -c ERROR ~postgres/backups.txt"` +EMAIL_SUBJECT="" +if [[ "$ERRORS_COUNT" -ne "0" ]] ; then + EMAIL_SUBJECT="Report backups error" +else + EMAIL_SUBJECT="Report backups" +fi + +cat ~postgres/backups.txt +if [ "$EMAIL_SEND" = "yes" ]; then + (echo 'List of all cluster backups:
' ; cat ~postgres/backups.txt ; echo '
';) | sendEmail -f "$EMAIL_HOSTNAME" -t $EMAILTO -s $EMAIL_SERVER -u $EMAIL_SUBJECT +fi diff --git a/16/docker-postgres/sql/background_start.sql b/16/docker-postgres/sql/background_start.sql new file mode 100644 index 0000000..1e5b425 --- /dev/null +++ b/16/docker-postgres/sql/background_start.sql @@ -0,0 +1,12 @@ +CREATE OR REPLACE FUNCTION util.background_start(p_command text) RETURNS void + LANGUAGE plpgsql + AS $$ +/* + Запускает указанную команду отдельным фоновым процессом без ожидания возврата результата +*/ +declare v_pid integer = pg_background_launch(p_command); +begin + perform pg_sleep(0.1); + perform pg_background_detach(v_pid); +end; +$$; \ No newline at end of file diff --git a/16/docker-postgres/sql/db_all.sql b/16/docker-postgres/sql/db_all.sql new file mode 100644 index 0000000..92d1176 --- /dev/null +++ b/16/docker-postgres/sql/db_all.sql @@ -0,0 +1,68 @@ +-- +-- код для всех БД +-- +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select current_database() as dbconnect \gset + +-- создаём объекты для мониторинга +CREATE EXTENSION IF NOT EXISTS plpython3u; +-- Upgrade pg_stat_statements +CREATE EXTENSION IF NOT EXISTS pg_stat_statements SCHEMA public; +ALTER EXTENSION pg_stat_statements UPDATE; +-- +CREATE EXTENSION IF NOT EXISTS pg_background SCHEMA public; +ALTER EXTENSION pg_background UPDATE; +-- +CREATE SCHEMA IF NOT EXISTS util; +COMMENT ON SCHEMA util IS 'Схема для хранения различных функций и представлений общего назначения'; +-- +GRANT USAGE ON SCHEMA util TO execution_group; +GRANT USAGE ON SCHEMA util TO read_procedure_group; +GRANT USAGE ON SCHEMA util TO readonly_group; +GRANT USAGE ON SCHEMA util TO write_group; +-- +CREATE SCHEMA IF NOT EXISTS pgbouncer; +COMMENT ON SCHEMA pgbouncer IS 'Схема для хранения функций пула коннектов'; +GRANT CONNECT ON DATABASE :"dbconnect" TO pgbouncer; +GRANT USAGE ON SCHEMA pgbouncer TO pgbouncer; + +\i user_lookup.sql + +GRANT EXECUTE ON FUNCTION pgbouncer.user_lookup(text) TO pgbouncer; +REVOKE EXECUTE ON FUNCTION pgbouncer.user_lookup(text) FROM public; +REVOKE EXECUTE ON FUNCTION pgbouncer.user_lookup(text) FROM execution_group; +-- +\i replace_char_xml.sql +-- +select current_setting('server_version_num')::integer >= 130000 as postgres_pgvers_13plus \gset +select current_setting('server_version_num')::integer >= 140000 as postgres_pgvers_14plus \gset +-- +\if :postgres_pgvers_13plus + \i vw_who_13plus.sql + -- + \if :postgres_pgvers_14plus + \i vw_who_tree_14plus.sql + \else + \i vw_who_tree.sql + \endif +\else + \i vw_who.sql + \i vw_who_tree.sql +\endif +-- +\i vw_locks.sql +-- +\i vw_partitions.sql +-- +\i send_email.sql +-- +\if :postgres_pgvers_13plus + \i inf_long_running_requests_13plus.sql +\else + \i inf_long_running_requests.sql +\endif + +\i background_start.sql diff --git a/16/docker-postgres/sql/db_notpostgres.sql b/16/docker-postgres/sql/db_notpostgres.sql new file mode 100644 index 0000000..ed18712 --- /dev/null +++ b/16/docker-postgres/sql/db_notpostgres.sql @@ -0,0 +1,289 @@ +-- +-- код для всех БД кроме postgres +-- +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select current_database() as dbconnect \gset +-- ckeck exists roles +select rolname as role_write_group from pg_roles where rolname = 'write_group' limit 1 \gset +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset +select rolname as role_execution_group from pg_roles where rolname = 'execution_group' limit 1 \gset + +-- ========================================================================== -- + +-- Upgrade pg_dbo_timestamp; +CREATE EXTENSION IF NOT EXISTS pg_dbo_timestamp SCHEMA public; +ALTER EXTENSION pg_dbo_timestamp UPDATE; + +ALTER EVENT TRIGGER dbots_tg_on_ddl_event DISABLE; +ALTER EVENT TRIGGER dbots_tg_on_drop_event DISABLE; + +\if :IS_SETUPDB + -- Upgrade PostGIS (includes raster) + CREATE EXTENSION IF NOT EXISTS postgis SCHEMA public; + ALTER EXTENSION postgis UPDATE; + + -- Upgrade Topology + CREATE EXTENSION IF NOT EXISTS postgis_topology; + ALTER EXTENSION postgis_topology UPDATE; + + -- Install Tiger dependencies in case not already installed + CREATE EXTENSION IF NOT EXISTS fuzzystrmatch SCHEMA public; + + -- Upgrade US Tiger Geocoder + CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder; + ALTER EXTENSION postgis_tiger_geocoder UPDATE; +\else + -- Install PostGIS (includes raster) + CREATE EXTENSION IF NOT EXISTS postgis SCHEMA public; + + -- Install Topology + CREATE EXTENSION IF NOT EXISTS postgis_topology; + + -- Install Tiger dependencies in case not already installed + CREATE EXTENSION IF NOT EXISTS fuzzystrmatch SCHEMA public; + + -- Install US Tiger Geocoder + CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder; +\endif + +-- Upgrade citext +CREATE EXTENSION IF NOT EXISTS citext SCHEMA public; +ALTER EXTENSION citext UPDATE; + +-- Upgrade uuid-ossp +CREATE EXTENSION IF NOT EXISTS "uuid-ossp" SCHEMA public; +ALTER EXTENSION "uuid-ossp" UPDATE; + +-- Upgrade adminpack; +CREATE EXTENSION IF NOT EXISTS adminpack; +ALTER EXTENSION adminpack UPDATE; + +-- Upgrade dblink +CREATE EXTENSION IF NOT EXISTS dblink SCHEMA public; +ALTER EXTENSION dblink UPDATE; + +-- Upgrade pageinspect +CREATE EXTENSION IF NOT EXISTS pageinspect SCHEMA public; +ALTER EXTENSION pageinspect UPDATE; + +-- Upgrade pg_buffercache +CREATE EXTENSION IF NOT EXISTS pg_buffercache SCHEMA public; +ALTER EXTENSION pg_buffercache UPDATE; + +-- Upgrade pg_prewarm +CREATE EXTENSION IF NOT EXISTS pg_prewarm SCHEMA public; +ALTER EXTENSION pg_prewarm UPDATE; + +-- Upgrade pg_stat_statements +CREATE EXTENSION IF NOT EXISTS pg_stat_statements SCHEMA public; +ALTER EXTENSION pg_stat_statements UPDATE; + +-- Upgrade pg_trgm +CREATE EXTENSION IF NOT EXISTS pg_trgm SCHEMA public; +ALTER EXTENSION pg_trgm UPDATE; + +-- Upgrade pgstattuple +CREATE EXTENSION IF NOT EXISTS pgstattuple SCHEMA public; +ALTER EXTENSION pgstattuple UPDATE; + +-- Upgrade postgres_fdw +CREATE EXTENSION IF NOT EXISTS postgres_fdw SCHEMA public; +ALTER EXTENSION postgres_fdw UPDATE; + +-- Upgrade file_fdw +CREATE EXTENSION IF NOT EXISTS file_fdw SCHEMA public; +ALTER EXTENSION file_fdw UPDATE; + +-- Upgrade amcheck +CREATE EXTENSION IF NOT EXISTS amcheck SCHEMA public; +ALTER EXTENSION amcheck UPDATE; + +-- Upgrade btree_gin +CREATE EXTENSION IF NOT EXISTS btree_gin SCHEMA public; +ALTER EXTENSION btree_gin UPDATE; + +-- Upgrade pldbgapi +CREATE EXTENSION IF NOT EXISTS pldbgapi SCHEMA public; +ALTER EXTENSION pldbgapi UPDATE; + +-- Upgrade pg_variables; +CREATE EXTENSION IF NOT EXISTS pg_variables SCHEMA public; +ALTER EXTENSION pg_variables UPDATE; + +-- Upgrade rum +CREATE EXTENSION IF NOT EXISTS rum SCHEMA public; +ALTER EXTENSION rum UPDATE; + +-- Upgrade hunspell_en_us +CREATE EXTENSION IF NOT EXISTS hunspell_en_us SCHEMA public; +ALTER EXTENSION hunspell_en_us UPDATE; + +-- Upgrade hunspell_ru_ru +CREATE EXTENSION IF NOT EXISTS hunspell_ru_ru SCHEMA public; +ALTER EXTENSION hunspell_ru_ru UPDATE; + +-- Upgrade hunspell_ru_ru_aou +CREATE EXTENSION IF NOT EXISTS hunspell_ru_ru_aot SCHEMA public; +ALTER EXTENSION hunspell_ru_ru_aot UPDATE; + +-- Upgrade shared_ispell; +select setting ~ 'shared_ispell' as is_shared_ispell_loaded from pg_settings where name ~ 'shared_preload_libraries' \gset +\if :is_shared_ispell_loaded + CREATE EXTENSION IF NOT EXISTS shared_ispell SCHEMA public; + ALTER EXTENSION shared_ispell UPDATE; +\endif + +-- Upgrade plpython3u; +CREATE EXTENSION IF NOT EXISTS plpython3u; +ALTER EXTENSION plpython3u UPDATE; + +-- Upgrade pg_tsparser +CREATE EXTENSION IF NOT EXISTS pg_tsparser SCHEMA public; +ALTER EXTENSION pg_tsparser UPDATE; + +-- Upgrade pg_repack +DROP EXTENSION IF EXISTS pg_repack; +CREATE EXTENSION IF NOT EXISTS pg_repack SCHEMA public; + +-- Upgrade plpgsql_check +DROP EXTENSION IF EXISTS plpgsql_check; +CREATE EXTENSION IF NOT EXISTS plpgsql_check SCHEMA public; + +-- ========================================================================== -- + +\i init_db_2_dblink_fdw.sql + +-- ========================================================================== -- + +GRANT SELECT ON public.dbots_object_timestamps TO write_group; +GRANT SELECT ON public.dbots_object_timestamps TO readonly_group; +GRANT SELECT ON public.dbots_object_timestamps TO :"role_deploy"; +GRANT SELECT ON public.dbots_event_data TO readonly_group; +GRANT SELECT, INSERT, UPDATE, DELETE ON public.dbots_event_data TO write_group; +GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE ON public.dbots_event_data TO :"role_deploy"; +GRANT SELECT ON public.dbots_event_data TO readonly_group; + +-- ========================================================================== -- + +\i init_db_3_cron.sql + +-- ========================================================================== -- + +\if :is_shared_ispell_loaded + + \i init_db_4_fts.sql + +\endif + +GRANT CONNECT, CREATE ON DATABASE :"dbconnect" TO :"role_deploy"; +GRANT CONNECT ON DATABASE :"dbconnect" TO readonly_group; +GRANT CONNECT ON DATABASE :"dbconnect" TO write_group; +GRANT CONNECT ON DATABASE :"dbconnect" TO execution_group; +GRANT CONNECT ON DATABASE :"dbconnect" TO read_procedure_group; +GRANT CONNECT ON DATABASE :"dbconnect" TO monitoring_group; + +-- ========================================================================= -- + +-- ==== привелегии по умолчанию ==== +ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO write_group; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO write_group; +ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO write_group; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO write_group; +-- +ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO readonly_group; +ALTER DEFAULT PRIVILEGES GRANT SELECT ON SEQUENCES TO readonly_group; +ALTER DEFAULT PRIVILEGES GRANT USAGE ON TYPES TO readonly_group; +ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO readonly_group; +-- +ALTER DEFAULT PRIVILEGES GRANT EXECUTE ON FUNCTIONS TO execution_group; +ALTER DEFAULT PRIVILEGES GRANT EXECUTE ON ROUTINES TO execution_group; +ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO read_procedure_group; +-- +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT ALL ON TABLES TO write_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT ALL ON SEQUENCES TO write_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT ALL ON TYPES TO write_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT ALL ON SCHEMAS TO write_group; +-- +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT SELECT ON TABLES TO readonly_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT SELECT ON SEQUENCES TO readonly_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT USAGE ON TYPES TO readonly_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT USAGE ON SCHEMAS TO readonly_group; +-- +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT EXECUTE ON FUNCTIONS TO execution_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT EXECUTE ON ROUTINES TO execution_group; +ALTER DEFAULT PRIVILEGES FOR ROLE :"role_deploy" GRANT USAGE ON SCHEMAS TO read_procedure_group; +-- + +-- ==== права на схемы (временно для ORM) ==== + +-- создаем отдельную схему разработки +CREATE SCHEMA IF NOT EXISTS :"DEV_SCHEMA" ; +ALTER SCHEMA :"DEV_SCHEMA" OWNER TO :"role_deploy"; +COMMENT ON SCHEMA :"DEV_SCHEMA" IS 'developer base schema'; + +-- выдаём USAGE права на все схемы для используемых групповых ролей +select 'GRANT USAGE ON SCHEMA ' || quote_ident(nspname) || ' TO ' || quote_ident(r) || ';' +from pg_namespace, unnest(ARRAY['write_group', 'readonly_group', 'execution_group', 'read_procedure_group', :'role_deploy']) as r +where nspname not in ('pg_toast','repack','pgbouncer') +\gexec + +-- и даем полные права для роли деплоя +GRANT ALL ON SCHEMA :"DEV_SCHEMA" TO :"role_deploy"; +GRANT ALL ON SCHEMA public TO :"role_deploy"; + +-- не даём читать текст функций поле prosrc +REVOKE SELECT ON pg_catalog.pg_proc FROM execution_group; +-- но даём читать все столбцы кроме prosrc +SELECT 'GRANT SELECT(' || string_agg(attname, ',') || ') ON pg_catalog.pg_proc TO execution_group;' +FROM pg_catalog.pg_attribute a +WHERE attrelid = 'pg_catalog.pg_proc'::regclass AND NOT attisdropped AND attname NOT IN ('tableoid','cmax','xmax','cmin','xmin', 'ctid', 'prosrc') +\gexec + +-- роль для чтения исходных кодов имеет нужные права +GRANT SELECT ON TABLE pg_catalog.pg_proc TO read_procedure_group; +GRANT SELECT ON TABLE information_schema.routines TO read_procedure_group; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_get_functiondef(oid) TO read_procedure_group; + +-- права на получение статистических данных +GRANT USAGE ON SCHEMA pg_catalog to monitoring_group; +GRANT USAGE ON SCHEMA public TO monitoring_group; +GRANT EXECUTE ON FUNCTION public.pg_stat_statements_reset(oid, oid, bigint) TO monitoring_group; +GRANT SELECT ON TABLE pg_catalog.pg_proc TO monitoring_group; + +-- роль деплоя также имеет права по чтению кода +GRANT SELECT ON TABLE pg_catalog.pg_proc TO :"role_deploy"; +GRANT SELECT ON TABLE information_schema.routines TO :"role_deploy"; +GRANT EXECUTE ON FUNCTION pg_catalog.pg_get_functiondef(oid) TO :"role_deploy"; + +-- ========================================================================= -- + +ALTER EVENT TRIGGER dbots_tg_on_ddl_event ENABLE; +ALTER EVENT TRIGGER dbots_tg_on_drop_event ENABLE; + +-- ========================================================================= -- + +GRANT CONNECT ON DATABASE :"dbconnect" TO mamonsu; +GRANT USAGE ON SCHEMA pg_catalog TO mamonsu; +GRANT SELECT ON TABLE pg_proc TO mamonsu; +-- + +-- проверка существования переменной окружения в базе данных +with _configs as ( + select r.rolname, unnest(s.setconfig) as config + from pg_db_role_setting s + left join pg_roles r on r.oid=s.setrole + where s.setdatabase = (select oid from pg_database where datname=current_database()) +), +_get as ( + select rolname, split_part(config, '=', 1) as variable, replace(config, split_part(config, '=', 1) || '=', '') as value + from _configs +) +SELECT NOT EXISTS(select 1 from _get where variable = 'adm.environment') as is_environment_db +\gset +\if :is_environment_db + -- устанавливаем переменную окружения для БД только если в БД нет ещё такой настройки + ALTER DATABASE :"dbconnect" SET adm.environment = :'environment_db_value'; +\endif diff --git a/16/docker-postgres/sql/db_postgres.sql b/16/docker-postgres/sql/db_postgres.sql new file mode 100644 index 0000000..ded81fd --- /dev/null +++ b/16/docker-postgres/sql/db_postgres.sql @@ -0,0 +1,53 @@ +-- +-- код только для БД postgres +-- +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset + +GRANT USAGE ON SCHEMA pg_catalog to monitoring_group; +GRANT USAGE ON SCHEMA public TO monitoring_group; +GRANT EXECUTE ON FUNCTION public.pg_stat_statements_reset(oid, oid, bigint) TO monitoring_group; +GRANT SELECT ON TABLE pg_catalog.pg_proc TO monitoring_group; +-- +-- Upgrade or install pg_cron +select setting ~ 'pg_cron' as is_pg_cron_loaded from pg_settings where name ~ 'shared_preload_libraries' \gset +\if :is_pg_cron_loaded + CREATE EXTENSION IF NOT EXISTS pg_cron; + ALTER EXTENSION pg_cron UPDATE; + GRANT USAGE ON SCHEMA cron TO mamonsu; + -- + delete from cron.job where command ilike '%util.inf_long_running_requests()%'; + -- организуем контроль за долгими процедурами + select cron.schedule('long query JOB all DB', '*/5 * * * *', 'select util.inf_long_running_requests();'); + -- + CREATE OR REPLACE FUNCTION cron.get_job_run_details( + p_dbname text, + p_interval interval DEFAULT '1 day'::interval + ) RETURNS SETOF cron.job_run_details + LANGUAGE sql SECURITY DEFINER + AS $$ + select * from cron.job_run_details where start_time >= now()-p_interval and database=p_dbname; + $$; + COMMENT ON FUNCTION cron.get_job_run_details(text, interval) IS 'Returns the history of completed jobs for the specified database and the specified time period'; + -- + -- эта функция должна быть доступна для выполнения роли mamonsu + GRANT EXECUTE ON FUNCTION cron.get_job_run_details(text, interval) TO mamonsu; + -- + GRANT ALL ON SCHEMA cron TO :"role_deploy"; + GRANT ALL ON TABLE cron.job TO :"role_deploy"; + GRANT ALL ON SEQUENCE cron.jobid_seq TO :"role_deploy"; + GRANT ALL ON TABLE cron.job_run_details TO :"role_deploy"; + GRANT ALL ON SEQUENCE cron.runid_seq TO :"role_deploy"; + + GRANT EXECUTE ON FUNCTION cron.schedule(text, text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.schedule(text, text, text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.schedule_in_database(text, text, text, text, text, boolean) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.unschedule(text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.unschedule(bigint) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.alter_job(bigint, text, text, text, text, boolean) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.get_job_run_details(text, interval) TO :"role_deploy"; +\endif +-- diff --git a/16/docker-postgres/sql/db_target.sql b/16/docker-postgres/sql/db_target.sql new file mode 100644 index 0000000..169f4b5 --- /dev/null +++ b/16/docker-postgres/sql/db_target.sql @@ -0,0 +1,14 @@ +-- +-- code only for the target (additionally specified) database +-- + +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select current_database() as dbconnect \gset +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset + +ALTER DATABASE :"dbconnect" OWNER TO :"role_deploy"; +GRANT ALL ON DATABASE :"dbconnect" TO :"role_deploy"; +ALTER DATABASE :"dbconnect" SET search_path = :DEV_SCHEMA, public, tiger; diff --git a/16/docker-postgres/sql/first_db.sql b/16/docker-postgres/sql/first_db.sql new file mode 100644 index 0000000..496d4d9 --- /dev/null +++ b/16/docker-postgres/sql/first_db.sql @@ -0,0 +1 @@ +select datname from pg_database where not datistemplate and datname not in ('postgres','mamonsu') limit 1; diff --git a/16/docker-postgres/sql/inf_long_running_requests.sql b/16/docker-postgres/sql/inf_long_running_requests.sql new file mode 100644 index 0000000..873d52c --- /dev/null +++ b/16/docker-postgres/sql/inf_long_running_requests.sql @@ -0,0 +1,105 @@ +CREATE OR REPLACE FUNCTION util.inf_long_running_requests( + p_query_age interval = '00:02:00'::interval, + p_recipients_list text = 'dba-postgresql@company.ru;'::text +) RETURNS void +LANGUAGE plpgsql +AS $$ +-- select util.inf_long_running_requests(); +-- select util.inf_long_running_requests(p_query_age:='00:00:30'::interval); +-- select cron.schedule('*/5 * * * *','select util.inf_long_running_requests();') +declare + v_html text; + v_arrpids text[]; + v_arrquery bytea[]; + v_nn_master int; + v_nn_parallel int; +begin + if exists( select 1 from public.vw_who + where state <> 'idle' + and datname not in ('mamonsu', 'postgres') + and coalesce(ts_age, xact_age, query_age) > p_query_age + and position('vacuum' in lower(query))=0 + and position('start_replication slot' in lower(query))=0 + ) then + -- есть, что отправлять... + -- читаем данные по длительным запросам + v_html = ( + select '' || string_agg( '' || coalesce(r.rn::text, ' ') || '' || + '' || coalesce(r.query_age, ' ') || '' || + '' || coalesce(r.pid::text, ' ') || '' || + '' || coalesce(r.blocked_by, ' ') || '' || + '' || coalesce(r.state, ' ') || '' || + '' || case when r.open_tran_count then '1' else ' ' end || '' || + '' || coalesce(util.replace_char_xml(r.query), ' ') || '' || + '' || coalesce(util.replace_char_xml(r.usename), ' ') || '' || + '' || coalesce(r.wait_info, ' ') || '' || + '' || coalesce(r.datname, ' ') || '' || + '' || coalesce(r.client_addr::text, ' ') || '' || + '' || coalesce(util.replace_char_xml(r.application_name), ' ') || '' || + '' || coalesce(r.backend_type, ' ') || '' + , '') + || '' + from ( + select case when position('.' in coalesce(ts_age, xact_age, query_age)::text) > 0 + then left(coalesce(ts_age, xact_age, query_age)::text, + position('.' in coalesce(ts_age, xact_age, query_age)::text)-1) + else coalesce(ts_age, xact_age, query_age)::text + end as query_age, + pid, + case when blocked_by::text = '{}' then '' else blocked_by::text end blocked_by, + state, + ts_age is not null as open_tran_count, + datname, + usename, + coalesce(wait_event_type || ' ', '') || coalesce('(' || wait_event || ')', '') as wait_info, + client_addr, + application_name, + backend_type, + left(query,255) as query, + row_number() over(order by query_age desc) as rn + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') + ) r + ); + -- собираем исходный код длительных запросов + select array_agg(pid || ' (' + || replace(case when position('.' in coalesce(ts_age, xact_age, query_age)::text) > 0 + then left(coalesce(ts_age, xact_age, query_age)::text, + position('.' in coalesce(ts_age, xact_age, query_age)::text)-1) + else coalesce(ts_age, xact_age, query_age)::text + end, ':', '-') + || ').sql') as pid, + array_agg(query::bytea) as query, + count(*) as cnt_master + into v_arrpids, v_arrquery, v_nn_master + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') + and coalesce(ts_age, xact_age, query_age) > p_query_age and position('vacuum' in lower(query))=0; + -- + select count(*) as cnt_parallel + into v_nn_parallel + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') + and coalesce(ts_age, xact_age, query_age) > p_query_age and position('vacuum' in lower(query))=0; + -- + v_html = ' +

Активность на сервере с долгими запросами

+

Всего найдено долгих запросов - ' || v_nn_master + coalesce(v_nn_parallel, 0) || ' из них:
+ основных - ' || v_nn_master || ', паралельных - ' || coalesce(v_nn_parallel, 0) || '

+ + + ' + || v_html || '
Ndurationpidblocked_bystateopen transql textuser namewait infodatabase nameclient addrprogram namebackend_type
'; + -- отправляем письмо + perform util.send_email(p_to := p_recipients_list, p_subject := 'Длительные запросы'::text, p_message := v_html, + p_attach_files_name := v_arrpids, p_attach_files_body := v_arrquery); + -- фиксируем в логе отправку письма + raise notice 'Письмо о длительных запросах отправлено'; + raise log 'util.inf_long_running_requests: Letter of lengthy requests sent'; + end if; +end +$$; diff --git a/16/docker-postgres/sql/inf_long_running_requests_13plus.sql b/16/docker-postgres/sql/inf_long_running_requests_13plus.sql new file mode 100644 index 0000000..ee5cfc0 --- /dev/null +++ b/16/docker-postgres/sql/inf_long_running_requests_13plus.sql @@ -0,0 +1,107 @@ +CREATE OR REPLACE FUNCTION util.inf_long_running_requests( + p_query_age interval = '00:02:00'::interval, + p_recipients_list text = 'dba-postgresql@company.ru;'::text +) RETURNS void +LANGUAGE plpgsql +AS $$ +-- select util.inf_long_running_requests(); +-- select util.inf_long_running_requests(p_query_age:='00:00:30'::interval); +-- select cron.schedule('*/5 * * * *','select util.inf_long_running_requests();') +declare + v_html text; + v_arrpids text[]; + v_arrquery bytea[]; + v_nn_master int; + v_nn_parallel int; +begin + if exists( select 1 from public.vw_who + where state <> 'idle' + and datname not in ('mamonsu', 'postgres') + and coalesce(ts_age, xact_age, query_age) > p_query_age + and position('vacuum' in lower(query))=0 + and position('start_replication slot' in lower(query))=0 + ) then + -- есть, что отправлять... + -- читаем данные по длительным запросам + v_html = ( + select '' || string_agg( '' || coalesce(r.rn::text, ' ') || '' || + '' || coalesce(r.query_age, ' ') || '' || + '' || coalesce(r.pid::text, ' ') || '' || + '' || coalesce(r.leader_pid::text, ' ') || '' || + '' || coalesce(r.blocked_by, ' ') || '' || + '' || coalesce(r.state, ' ') || '' || + '' || case when r.open_tran_count then '1' else ' ' end || '' || + '' || coalesce(util.replace_char_xml(r.query), ' ') || '' || + '' || coalesce(util.replace_char_xml(r.usename), ' ') || '' || + '' || coalesce(r.wait_info, ' ') || '' || + '' || coalesce(r.datname, ' ') || '' || + '' || coalesce(r.client_addr::text, ' ') || '' || + '' || coalesce(util.replace_char_xml(r.application_name), ' ') || '' || + '' || coalesce(r.backend_type, ' ') || '' + , '') + || '' + from ( + select case when position('.' in coalesce(ts_age, xact_age, query_age)::text) > 0 + then left(coalesce(ts_age, xact_age, query_age)::text, + position('.' in coalesce(ts_age, xact_age, query_age)::text)-1) + else coalesce(ts_age, xact_age, query_age)::text + end as query_age, + pid, + leader_pid, + case when blocked_by::text = '{}' then '' else blocked_by::text end blocked_by, + state, + ts_age is not null as open_tran_count, + datname, + usename, + coalesce(wait_event_type || ' ', '') || coalesce('(' || wait_event || ')', '') as wait_info, + client_addr, + application_name, + backend_type, + left(query,255) as query, + row_number() over(order by query_age desc) as rn + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') + ) r + ); + -- собираем исходный код длительных запросов + select array_agg(pid || ' (' + || replace(case when position('.' in coalesce(ts_age, xact_age, query_age)::text) > 0 + then left(coalesce(ts_age, xact_age, query_age)::text, + position('.' in coalesce(ts_age, xact_age, query_age)::text)-1) + else coalesce(ts_age, xact_age, query_age)::text + end, ':', '-') + || ').sql') as pid, + array_agg(query::bytea) as query, + count(*) as cnt_master + into v_arrpids, v_arrquery, v_nn_master + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') and leader_pid is null + and coalesce(ts_age, xact_age, query_age) > p_query_age and position('vacuum' in lower(query))=0; + -- + select count(*) as cnt_parallel + into v_nn_parallel + from public.vw_who + where state <> 'idle' and datname not in ('mamonsu', 'postgres') and leader_pid is not null + and coalesce(ts_age, xact_age, query_age) > p_query_age and position('vacuum' in lower(query))=0; + -- + v_html = ' +

Активность на сервере с долгими запросами

+

Всего найдено долгих запросов - ' || v_nn_master + coalesce(v_nn_parallel, 0) || ' из них:
+ основных - ' || v_nn_master || ', паралельных - ' || coalesce(v_nn_parallel, 0) || '

+ + + ' + || v_html || '
Ndurationpidleader_pidblocked_bystateopen transql textuser namewait infodatabase nameclient addrprogram namebackend_type
'; + -- отправляем письмо + perform util.send_email(p_to := p_recipients_list, p_subject := 'Длительные запросы'::text, p_message := v_html, + p_attach_files_name := v_arrpids, p_attach_files_body := v_arrquery); + -- фиксируем в логе отправку письма + raise notice 'Письмо о длительных запросах отправлено'; + raise log 'util.inf_long_running_requests: Letter of lengthy requests sent'; + end if; +end +$$; diff --git a/16/docker-postgres/sql/init_db_2_dblink_fdw.sql b/16/docker-postgres/sql/init_db_2_dblink_fdw.sql new file mode 100644 index 0000000..379d7a7 --- /dev/null +++ b/16/docker-postgres/sql/init_db_2_dblink_fdw.sql @@ -0,0 +1,59 @@ +select current_user as userconnect, current_user='postgres' as is_user_postgres \gset +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset + +CREATE EXTENSION IF NOT EXISTS dblink SCHEMA public; +CREATE EXTENSION IF NOT EXISTS postgres_fdw SCHEMA public; + +-- DBLINK позволяет выполнять автономные запросы +-- DBLINK: в БД postgres +CREATE SERVER IF NOT EXISTS dblink_postgres + FOREIGN DATA WRAPPER dblink_fdw + OPTIONS (host 'localhost', port '5432', dbname 'postgres'); + +-- FDW поддерживает транзакции +-- FDW: в БД postgres +CREATE SERVER IF NOT EXISTS fdw_postgres + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (host 'localhost', port '5432', dbname 'postgres'); + +-- если задан POSTGRES_PASSWORD то создаём маппинги для пользователя postgres +\if :{?POSTGRES_PASSWORD} + -- для fdw_postgres + CREATE USER MAPPING IF NOT EXISTS FOR postgres + SERVER fdw_postgres + OPTIONS (user 'postgres', password :'POSTGRES_PASSWORD'); + -- для dblink_postgres + CREATE USER MAPPING IF NOT EXISTS FOR postgres + SERVER dblink_postgres + OPTIONS (user 'postgres', password :'POSTGRES_PASSWORD'); +\else + -- для fdw_postgres + CREATE USER MAPPING IF NOT EXISTS FOR postgres + SERVER fdw_postgres + OPTIONS (user 'postgres'); + -- для dblink_postgres + CREATE USER MAPPING IF NOT EXISTS FOR postgres + SERVER dblink_postgres + OPTIONS (user 'postgres'); +\endif + +-- если задан DEPLOY_PASSWORD то создаём маппинги для пользователя развертывания приложений +\if :{?DEPLOY_PASSWORD} + -- для fdw_postgres + CREATE USER MAPPING IF NOT EXISTS FOR :role_deploy + SERVER fdw_postgres + OPTIONS (user :'role_deploy', password :'DEPLOY_PASSWORD'); + -- для dblink_postgres + CREATE USER MAPPING IF NOT EXISTS FOR :role_deploy + SERVER dblink_postgres + OPTIONS (user :'role_deploy', password :'DEPLOY_PASSWORD'); +\else + -- для fdw_postgres + CREATE USER MAPPING IF NOT EXISTS FOR :role_deploy + SERVER fdw_postgres + OPTIONS (user :'role_deploy'); + -- для dblink_postgres + CREATE USER MAPPING IF NOT EXISTS FOR :role_deploy + SERVER dblink_postgres + OPTIONS (user :'role_deploy'); +\endif diff --git a/16/docker-postgres/sql/init_db_3_cron.sql b/16/docker-postgres/sql/init_db_3_cron.sql new file mode 100644 index 0000000..6dc8209 --- /dev/null +++ b/16/docker-postgres/sql/init_db_3_cron.sql @@ -0,0 +1,221 @@ +-- +-- PostgreSQL database cluster dump +-- + +-- Started on 2018-02-19 09:09:23 MSK + +SET default_transaction_read_only = off; + +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select current_database() as dbconnect \gset + +-- +CREATE SCHEMA IF NOT EXISTS cron; +-- +CREATE FOREIGN TABLE IF NOT EXISTS cron.job +( + jobid bigint OPTIONS (column_name 'jobid') NOT NULL, + schedule text OPTIONS (column_name 'schedule') NOT NULL, + command text OPTIONS (column_name 'command') NOT NULL, + nodename text OPTIONS (column_name 'nodename') NOT NULL, + nodeport integer OPTIONS (column_name 'nodeport') NOT NULL, + "database" text OPTIONS (column_name 'database') NOT NULL, + username text OPTIONS (column_name 'username') NOT NULL, + active boolean OPTIONS (column_name 'active') NOT NULL, + jobname text OPTIONS (column_name 'jobname') +) + SERVER fdw_postgres + OPTIONS (schema_name 'cron', table_name 'job'); +-- +CREATE FOREIGN TABLE IF NOT EXISTS cron.job_run_details +( + jobid bigint OPTIONS (column_name 'jobid'), + runid bigint OPTIONS (column_name 'runid') NOT NULL, + job_pid integer OPTIONS (column_name 'job_pid'), + "database" text OPTIONS (column_name 'database'), + username text OPTIONS (column_name 'username'), + command text OPTIONS (column_name 'command'), + status text OPTIONS (column_name 'status'), + return_message text OPTIONS (column_name 'return_message'), + start_time timestamp with time zone OPTIONS (column_name 'start_time'), + end_time timestamp with time zone OPTIONS (column_name 'end_time') +) + SERVER fdw_postgres + OPTIONS (schema_name 'cron', table_name 'job_run_details'); +-- +CREATE OR REPLACE FUNCTION cron.schedule(schedule text, command text) + RETURNS bigint AS +$$ +declare + v_jobid bigint; +begin + select jobid into v_jobid + from public.dblink('dblink_postgres', format('select * from cron.schedule_in_database(%L, %L, %L, %L, %L)', + 'JOB (DB ' || current_database() || ')', schedule, command, current_database(), current_user + ) + ) as (jobid bigint); + -- + update cron.job set jobname = null where jobid = v_jobid; + -- + return v_jobid; +end; +$$ + LANGUAGE plpgsql VOLATILE STRICT + COST 100; + +COMMENT ON FUNCTION cron.schedule(text, text) IS 'schedule a pg_cron job without job name'; + +-- + +CREATE OR REPLACE FUNCTION cron.schedule(job_name text, schedule text, command text) + RETURNS bigint AS +$$ + select jobid + from public.dblink('dblink_postgres', format('select * from cron.schedule_in_database(%L, %L, %L, %L, %L)', + job_name ||' (DB ' || current_database() || ')', schedule, command, current_database(), current_user + ) + ) as (jobid bigint); +$$ + LANGUAGE sql VOLATILE STRICT + COST 100; + +COMMENT ON FUNCTION cron.schedule(text, text, text) IS 'schedule a pg_cron job with job name'; +-- +CREATE OR REPLACE FUNCTION cron.schedule_in_database(job_name text, schedule text, command text, database text, username text, active boolean) RETURNS bigint + AS $$ +declare + v_jobid bigint; +begin + select jobid into v_jobid + from public.dblink('dblink_postgres', format('select * from cron.schedule_in_database(%L, %L, %L, %L, %L, %L)', + job_name, schedule, command, database, username, active + ) + ) as (jobid bigint); + -- + update cron.job + set database = schedule_in_database.database, + active = schedule_in_database.active + where jobid = v_jobid; + -- + return v_jobid; +end; +$$ + LANGUAGE plpgsql VOLATILE STRICT + COST 100; + +COMMENT ON FUNCTION cron.schedule_in_database(job_name text, schedule text, command text, database text, username text, active boolean) IS 'schedule a pg_cron job with full parameters'; +-- +CREATE OR REPLACE FUNCTION cron.unschedule(job_id bigint) + RETURNS boolean AS +$$ + with _del as ( + delete from cron.job where jobid = job_id + returning jobid + ) + select count(*)=1 from _del; +$$ + LANGUAGE sql VOLATILE STRICT + COST 100; + +COMMENT ON FUNCTION cron.unschedule(bigint) IS 'unschedule a pg_cron job as number job'; + +-- + +CREATE OR REPLACE FUNCTION cron.unschedule(job_name text) + RETURNS boolean AS +$$ + with _del as ( + delete from cron.job where jobname = job_name and username = current_user + returning jobid + ) + select count(*)=1 from _del; +$$ + LANGUAGE sql VOLATILE STRICT + COST 100; + +COMMENT ON FUNCTION cron.unschedule(text) IS 'unschedule a pg_cron job as job name'; +-- +CREATE OR REPLACE FUNCTION cron.alter_job( + job_id bigint, + schedule text = NULL::text, + command text = NULL::text, + "database" text = NULL::text, + username text = NULL::text, + active boolean = NULL::boolean, + job_name text = NULL::text +) RETURNS void + LANGUAGE sql + AS $$ + update cron.job set schedule = alter_job.schedule where jobid=alter_job.job_id and alter_job.schedule is not null; + update cron.job set command = alter_job.command where jobid=alter_job.job_id and alter_job.command is not null; + update cron.job set "database" = alter_job."database" where jobid=alter_job.job_id and alter_job."database" is not null; + update cron.job set username = alter_job.username where jobid=alter_job.job_id and alter_job.username is not null; + update cron.job set active = alter_job.active where jobid=alter_job.job_id and alter_job.active is not null; + update cron.job set jobname = alter_job.job_name where jobid=alter_job.job_id and alter_job.job_name is not null; +$$; + +COMMENT ON FUNCTION cron.alter_job(bigint, text, text, text, text, boolean, text) IS 'Alter the job identified by job_id. Any option left as NULL will not be modified.'; +-- +CREATE OR REPLACE FUNCTION cron.get_job_run_details( + p_dbname text, + p_interval interval DEFAULT '1 day'::interval +) RETURNS SETOF cron.job_run_details + LANGUAGE sql SECURITY DEFINER + AS $$ +select * from cron.job_run_details where start_time >= now()-p_interval and database=p_dbname; +$$; +COMMENT ON FUNCTION cron.get_job_run_details(text, interval) IS 'Returns the history of completed jobs for the specified database and the specified time period'; +-- +-- эта функция должна быть доступна для выполнения роли mamonsu +GRANT EXECUTE ON FUNCTION cron.get_job_run_details(text, interval) TO mamonsu; + + +-- ckeck exists roles +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset +select rolname as role_write_group from pg_roles where rolname = 'write_group' limit 1 \gset +select rolname as role_execution_group from pg_roles where rolname = 'execution_group' limit 1 \gset + +GRANT ALL ON SCHEMA cron TO postgres; +\if :{?role_deploy} + GRANT ALL ON SCHEMA cron TO :role_deploy; +\endif +\if :{?role_write_group} + GRANT USAGE ON SCHEMA cron TO write_group; + GRANT ALL ON TABLE cron.job TO write_group; + GRANT ALL ON TABLE cron.job_run_details TO write_group; + GRANT USAGE ON SCHEMA pg_catalog TO write_group; + GRANT EXECUTE ON FUNCTION cron.schedule(text, text) TO write_group; + GRANT EXECUTE ON FUNCTION cron.schedule(text, text, text) TO write_group; + GRANT EXECUTE ON FUNCTION cron.schedule_in_database(text, text, text, text, text, boolean) TO write_group; + GRANT EXECUTE ON FUNCTION cron.unschedule(bigint) TO write_group; + GRANT EXECUTE ON FUNCTION cron.unschedule(text) TO write_group; + GRANT EXECUTE ON FUNCTION cron.alter_job(bigint, text, text, text, text, boolean, text) TO write_group; + GRANT EXECUTE ON FUNCTION cron.get_job_run_details(text, interval) TO write_group; +\endif +-- +\if :{?role_deploy} + GRANT ALL ON SCHEMA cron TO :"role_deploy"; + GRANT ALL ON TABLE cron.job TO :"role_deploy"; + GRANT ALL ON TABLE cron.job_run_details TO :"role_deploy"; + GRANT USAGE ON SCHEMA pg_catalog TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.schedule(text, text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.schedule(text, text, text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.schedule_in_database(text, text, text, text, text, boolean) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.unschedule(bigint) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.unschedule(text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.alter_job(bigint, text, text, text, text, boolean, text) TO :"role_deploy"; + GRANT EXECUTE ON FUNCTION cron.get_job_run_details(text, interval) TO :"role_deploy"; +\endif + +-- в 1-ю неделю месяца замораживаем идентификаторы транзакций, в остальные недели только собираем статистику +\if :{?IS_SETUPDB} + \if :IS_SETUPDB + select cron.schedule('vacuum JOB freeze', '0 0 1-7 1,6 */7', 'vacuum (freeze,analyze);'); -- 1-я неделя каждого полугодия + select cron.schedule('vacuum JOB', '0 0 8-31 * */7', 'vacuum (analyze);'); -- 2-4 неделя каждого месяца + \endif +\else + select cron.schedule('vacuum JOB freeze', '0 0 1-7 1,6 */7', 'vacuum (freeze,analyze);'); -- 1-я неделя каждого полугодия + select cron.schedule('vacuum JOB', '0 0 8-31 * */7', 'vacuum (analyze);'); -- 2-4 неделя каждого месяца +\endif diff --git a/16/docker-postgres/sql/init_db_4_fts.sql b/16/docker-postgres/sql/init_db_4_fts.sql new file mode 100644 index 0000000..8accfc5 --- /dev/null +++ b/16/docker-postgres/sql/init_db_4_fts.sql @@ -0,0 +1,234 @@ +-- ========================================================================== -- +DROP TEXT SEARCH CONFIGURATION IF EXISTS public.fts_snowball_en_ru_sw; + +DROP TEXT SEARCH CONFIGURATION IF EXISTS public.fts_hunspell_en_ru; +DROP TEXT SEARCH CONFIGURATION IF EXISTS public.fts_aot_en_ru; + +DROP TEXT SEARCH DICTIONARY IF EXISTS public.english_hunspell_shared; +DROP TEXT SEARCH DICTIONARY IF EXISTS public.russian_hunspell_shared; +DROP TEXT SEARCH DICTIONARY IF EXISTS public.russian_aot_shared; + +DROP TEXT SEARCH CONFIGURATION IF EXISTS public.fts_hunspell_en_ru_sw; +DROP TEXT SEARCH CONFIGURATION IF EXISTS public.fts_aot_en_ru_sw; + +DROP TEXT SEARCH DICTIONARY IF EXISTS public.english_hunspell_shared_sw; +DROP TEXT SEARCH DICTIONARY IF EXISTS public.russian_hunspell_shared_sw; +DROP TEXT SEARCH DICTIONARY IF EXISTS public.russian_aot_shared_sw; + +DROP TEXT SEARCH DICTIONARY IF EXISTS public.fts_simple; +-- ========================================================================== -- + +-- DICTIONARY without stopwords +CREATE TEXT SEARCH DICTIONARY public.english_hunspell_shared ( + TEMPLATE = public.shared_ispell, + dictfile = 'en_us', afffile = 'en_us' +); +COMMENT ON TEXT SEARCH DICTIONARY public.english_hunspell_shared IS 'FTS hunspell dictionary for english language (shared without stopwords)'; + +CREATE TEXT SEARCH DICTIONARY public.russian_hunspell_shared ( + TEMPLATE = public.shared_ispell, + dictfile = 'ru_ru', afffile = 'ru_ru' +); +COMMENT ON TEXT SEARCH DICTIONARY public.russian_hunspell_shared IS 'FTS hunspell Lebedev dictionary for russian language (shared without stopwords)'; + +CREATE TEXT SEARCH DICTIONARY public.russian_aot_shared ( + TEMPLATE = public.shared_ispell, + dictfile = 'ru_ru_aot', afffile = 'ru_ru_aot' +); +COMMENT ON TEXT SEARCH DICTIONARY public.russian_aot_shared IS 'FTS hunspell AOT dictionary for russian language (shared without stopwords)'; + +-- DICTIONARY with stopwords +CREATE TEXT SEARCH DICTIONARY public.english_hunspell_shared_sw ( + TEMPLATE = public.shared_ispell, + dictfile = 'en_us', afffile = 'en_us', stopwords = 'english' +); +COMMENT ON TEXT SEARCH DICTIONARY public.english_hunspell_shared_sw IS 'FTS hunspell dictionary for english language (shared with stopwords)'; + +CREATE TEXT SEARCH DICTIONARY public.russian_hunspell_shared_sw ( + TEMPLATE = public.shared_ispell, + dictfile = 'ru_ru', afffile = 'ru_ru', stopwords = 'russian' +); +COMMENT ON TEXT SEARCH DICTIONARY public.russian_hunspell_shared_sw IS 'FTS hunspell Lebedev dictionary for russian language (shared with stopwords)'; + +CREATE TEXT SEARCH DICTIONARY public.russian_aot_shared_sw ( + TEMPLATE = public.shared_ispell, + dictfile = 'ru_ru_aot', afffile = 'ru_ru_aot', stopwords = 'russian' +); +COMMENT ON TEXT SEARCH DICTIONARY public.russian_aot_shared_sw IS 'FTS hunspell AOT dictionary for russian language (shared with stopwords)'; + +-- ========================================================================== -- + +-- CONFIGURATION without stopwords +CREATE TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru (parser=public.tsparser); + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru + ADD MAPPING FOR email, file, float, host, hword_numpart, int, numhword, numword, sfloat, uint, url, url_path, version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH english_hunspell_shared, english_stem; + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru + ALTER MAPPING FOR hword, hword_part, word + WITH russian_hunspell_shared, russian_stem; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru IS 'FTS hunspell Lebedev configuration for russian language based on shared_ispell without stopwords'; + +-- ========================================================================== -- + +CREATE TEXT SEARCH CONFIGURATION public.fts_aot_en_ru (parser=public.tsparser); + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru + ADD MAPPING FOR email, file, float, host, hword_numpart, int, numhword, numword, sfloat, uint, url, url_path, version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH english_hunspell_shared, english_stem; + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru + ALTER MAPPING FOR hword, hword_part, word + WITH russian_aot_shared, russian_stem; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_aot_en_ru IS 'FTS hunspell AOT configuration for russian language based on shared_ispell without stopwords'; + +-- ========================================================================== -- + +-- CONFIGURATION with stopwords +CREATE TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru_sw (parser=public.tsparser); + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru_sw + ADD MAPPING FOR email, file, float, host, hword_numpart, int, numhword, numword, sfloat, uint, url, url_path, version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru_sw + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH english_hunspell_shared_sw, english_stem; + +ALTER TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru_sw + ALTER MAPPING FOR hword, hword_part, word + WITH russian_hunspell_shared_sw, russian_stem; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_hunspell_en_ru_sw IS 'FTS hunspell Lebedev configuration for russian language based on shared_ispell with stopwords'; + +-- ========================================================================== -- + +CREATE TEXT SEARCH CONFIGURATION public.fts_aot_en_ru_sw (parser=public.tsparser); + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru_sw + ADD MAPPING FOR email, file, float, host, hword_numpart, int, numhword, numword, sfloat, uint, url, url_path, version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru_sw + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH english_hunspell_shared_sw, english_stem; + +ALTER TEXT SEARCH CONFIGURATION public.fts_aot_en_ru_sw + ALTER MAPPING FOR hword, hword_part, word + WITH russian_aot_shared_sw, russian_stem; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_aot_en_ru_sw IS 'FTS hunspell AOT configuration for russian language based on shared_ispell with stopwords'; + +-- ========================================================================== -- + +CREATE TEXT SEARCH CONFIGURATION public.fts_snowball_en_ru_sw (parser=public.tsparser); + +ALTER TEXT SEARCH CONFIGURATION public.fts_snowball_en_ru_sw + ADD MAPPING FOR email, file, float, host, hword_numpart, int, numhword, numword, sfloat, uint, url, url_path, version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_snowball_en_ru_sw + ALTER MAPPING FOR asciiword, asciihword, hword_asciipart + WITH english_stem; + +ALTER TEXT SEARCH CONFIGURATION public.fts_snowball_en_ru_sw + ALTER MAPPING FOR hword, hword_part, word + WITH russian_stem; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_snowball_en_ru_sw IS 'FTS snowball configuration for russian language based on tsparser with stopwords'; + +-- ========================================================================== -- + +CREATE TEXT SEARCH CONFIGURATION public.fts_simple ( + PARSER = public.tsparser ); + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR hword_part + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR hword + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR sfloat + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR numhword + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR uint + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR hword_numpart + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR float + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR version + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR url + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR int + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR asciiword + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR hword_asciipart + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR file + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR asciihword + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR host + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR numword + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR word + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR email + WITH simple; + +ALTER TEXT SEARCH CONFIGURATION public.fts_simple + ADD MAPPING FOR url_path + WITH simple; + +COMMENT ON TEXT SEARCH CONFIGURATION public.fts_simple IS 'Simple configuration'; + +-- ========================================================================== -- diff --git a/16/docker-postgres/sql/post.sql b/16/docker-postgres/sql/post.sql new file mode 100644 index 0000000..a95d847 --- /dev/null +++ b/16/docker-postgres/sql/post.sql @@ -0,0 +1,68 @@ +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +-- ==== убирание public доступов ==== + +-- убираем все права для роли public +-- запрещаем кому бы то ни было создавать временные объекты для роли public +-- запрещаем кому бы то ни было подключаться к БД для роли public +select '\c '||datname||chr(13)||chr(10)||'REVOKE ALL ON DATABASE "' || datname || '" FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для таблиц +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES REVOKE ALL PRIVILEGES ON TABLES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для последовательностей +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES REVOKE ALL PRIVILEGES ON SEQUENCES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для типов +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES REVOKE ALL PRIVILEGES ON TYPES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public на функции +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES REVOKE ALL PRIVILEGES ON FUNCTIONS FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для таблиц +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES FOR ROLE deploy REVOKE ALL PRIVILEGES ON TABLES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для последовательностей +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES FOR ROLE deploy REVOKE ALL PRIVILEGES ON SEQUENCES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public на функции +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES FOR ROLE deploy REVOKE ALL PRIVILEGES ON FUNCTIONS FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- запрещаем вновь создаваемым объектам получать любое право для роли public для типов +select '\c "'||datname||'"'||chr(13)||chr(10)||'ALTER DEFAULT PRIVILEGES FOR ROLE deploy REVOKE ALL PRIVILEGES ON TYPES FROM public;' +from pg_database +where datname not in ('template1','template0'); + +-- Запрещаем кому бы то ни было читать код процедур для роли public +select '\c "'||datname||'"'||chr(13)||chr(10)||'REVOKE ALL PRIVILEGES ON pg_catalog.pg_proc, information_schema.routines FROM PUBLIC; +REVOKE ALL PRIVILEGES ON FUNCTION pg_catalog.pg_get_functiondef(oid) FROM PUBLIC;' +from pg_database +where datname not in ('template1','template0'); + +-- видеть код и структуру данных для роли public +select '\c "'||datname||'"'||chr(13)||chr(10)||'REVOKE ALL PRIVILEGES ON SCHEMA pg_catalog, information_schema, public FROM PUBLIC;' +from pg_database +where datname not in ('template1','template0'); + +-- разрешаем подключаться логинам мониторинга ко всем БД +select 'GRANT CONNECT ON DATABASE "'||datname||'" TO monitoring_group;' +from pg_database +where datname not in ('template1','template0'); diff --git a/16/docker-postgres/sql/post_warning.sql b/16/docker-postgres/sql/post_warning.sql new file mode 100644 index 0000000..0943fd9 --- /dev/null +++ b/16/docker-postgres/sql/post_warning.sql @@ -0,0 +1,13 @@ +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +select not (setting ~ 'shared_ispell') as is_shared_ispell_notloaded from pg_settings where name ~ 'shared_preload_libraries' \gset +\if :is_shared_ispell_notloaded + \echo "" + \echo "-- ================================================================================================================ --" + \echo "Please, after the 1st start of the container with an empty database directory, сorrect in the postgreSQL.conf file," + \echo "the 'shared_preload_libraries' parameter it must include the download of the 'shared_ispell' library " + \echo "and re-run the script: update-extension.sh" + \echo "-- ================================================================================================================ --" +\endif diff --git a/16/docker-postgres/sql/pre.sql b/16/docker-postgres/sql/pre.sql new file mode 100644 index 0000000..291d816 --- /dev/null +++ b/16/docker-postgres/sql/pre.sql @@ -0,0 +1,129 @@ +SET default_transaction_read_only = off; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; + +-- create DB template_extension +select not exists(select true FROM pg_catalog.pg_database where datname='template_extension') as is_check +\gset +\if :is_check + CREATE DATABASE template_extension IS_TEMPLATE true; +\endif + +\if :{?APP_DB} + select not exists(select true FROM pg_catalog.pg_database where datname = :'APP_DB') as is_check + \gset + \if :is_check + CREATE DATABASE :"APP_DB"; + \endif + + -- роль для приложения + select not exists(select true FROM pg_catalog.pg_roles where rolname=:'APP_DB') as is_check + \gset + \if :is_check + CREATE ROLE :"APP_DB" ; + SET log_statement='none'; + ALTER ROLE :"APP_DB" WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB LOGIN NOREPLICATION NOBYPASSRLS PASSWORD :'APP_DB_PASSWORD'; + SET log_statement='ddl'; + ALTER DATABASE :"APP_DB" OWNER TO :"APP_DB"; + \endif +\endif + +-- create role deploy +-- роль для деплоя, т.е. все объекты в БД должны быть созданы от нее, а не от пользователя postgres (sa) +select not exists(select true FROM pg_catalog.pg_roles where rolname ilike '%deploy%') as is_check +\gset +\if :is_check + CREATE ROLE deploy; + SET log_statement='none'; + ALTER ROLE deploy WITH NOSUPERUSER INHERIT CREATEROLE CREATEDB LOGIN NOREPLICATION NOBYPASSRLS PASSWORD :'DEPLOY_PASSWORD'; + GRANT pg_signal_backend TO deploy; + SET log_statement='ddl'; +\endif +select rolname as role_deploy from pg_roles where rolname ilike '%deploy%' limit 1 \gset + +-- create role replicator +select not exists(select true FROM pg_catalog.pg_roles where rolname='replicator') as is_check +\gset +\if :is_check + CREATE ROLE replicator; +\endif +ALTER ROLE replicator WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB LOGIN REPLICATION NOBYPASSRLS; +GRANT CONNECT ON DATABASE postgres TO replicator; -- с версии 2.1.3 patroni требуется право CONNECT для выполнения проверок кластера + +-- create group readonly_group +select not exists(select true FROM pg_catalog.pg_roles where rolname='readonly_group') as is_check +\gset +\if :is_check + CREATE ROLE readonly_group; +\endif +ALTER ROLE readonly_group WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS; + +-- create group write_group +select not exists(select true FROM pg_catalog.pg_roles where rolname='write_group') as is_check +\gset +\if :is_check + CREATE ROLE write_group; +\endif +ALTER ROLE write_group WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS; + +-- create group execution_group +select not exists(select true FROM pg_catalog.pg_roles where rolname='execution_group') as is_check +\gset +\if :is_check + CREATE ROLE execution_group; +\endif +ALTER ROLE execution_group WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS; + +-- create group read_procedure_group +select not exists(select true FROM pg_catalog.pg_roles where rolname='read_procedure_group') as is_check +\gset +\if :is_check + CREATE ROLE read_procedure_group; +\endif +ALTER ROLE read_procedure_group WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS; + +-- added role mamonsu +select not exists(select * from pg_roles where rolname = 'mamonsu') as is_check +\gset +\if :is_check + CREATE ROLE mamonsu LOGIN NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE NOREPLICATION; +\endif + +-- группа мониторинга +select not exists(select true FROM pg_catalog.pg_roles where rolname='monitoring_group') as is_check +\gset +\if :is_check + CREATE ROLE monitoring_group; +\endif +ALTER ROLE monitoring_group WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS; +GRANT pg_monitor TO monitoring_group; + +-- роль для коннекта +select not exists(select true FROM pg_catalog.pg_roles where rolname='pgbouncer') as is_check +\gset +\if :is_check + -- пользователь pgbouncer должен иметь только md5 аутентификацию + CREATE ROLE pgbouncer; + SET log_statement='none'; + select setting as pswd_enc from pg_settings where name = 'password_encryption' \gset + SET password_encryption = 'md5'; + ALTER ROLE pgbouncer WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB LOGIN NOREPLICATION NOBYPASSRLS PASSWORD :'PGBOUNCER_PASSWORD'; + set password_encryption = :'pswd_enc'; + SET log_statement='ddl'; +\endif + +-- added rights +GRANT CONNECT ON DATABASE postgres TO :"role_deploy"; +GRANT CONNECT ON DATABASE postgres TO readonly_group; +GRANT CONNECT ON DATABASE postgres TO write_group; +GRANT CONNECT ON DATABASE postgres TO execution_group; +GRANT CONNECT ON DATABASE postgres TO read_procedure_group; +GRANT CONNECT ON DATABASE postgres TO monitoring_group; +GRANT ALL PRIVILEGES ON TABLESPACE pg_global TO monitoring_group; + +-- на пока даём права как для ORM роли приложения +\if :{?APP_DB} + GRANT write_group TO :"APP_DB" ; + GRANT execution_group TO :"APP_DB" ; + GRANT readonly_group TO :"APP_DB" ; +\endif diff --git a/16/docker-postgres/sql/replace_char_xml.sql b/16/docker-postgres/sql/replace_char_xml.sql new file mode 100644 index 0000000..5ea5d58 --- /dev/null +++ b/16/docker-postgres/sql/replace_char_xml.sql @@ -0,0 +1,5 @@ +CREATE OR REPLACE FUNCTION util.replace_char_xml(p_str2xml text) RETURNS text + LANGUAGE sql IMMUTABLE PARALLEL SAFE COST 10.0 + AS $$ + select replace(replace(replace(replace(replace(p_str2xml,'&','&'),'''','''),'"','"'),'<','<'),'>','>'); +$$; diff --git a/16/docker-postgres/sql/send_email.sql b/16/docker-postgres/sql/send_email.sql new file mode 100644 index 0000000..b944abc --- /dev/null +++ b/16/docker-postgres/sql/send_email.sql @@ -0,0 +1,95 @@ +CREATE OR REPLACE FUNCTION util.send_email( + p_to text, + p_subject text, + p_message text, + p_copy text = NULL::text, + p_blindcopy text = NULL::text, + p_attach_files_name text[] = NULL::text[], + p_attach_files_body bytea[] = NULL::bytea[], + p_attach_files_codec text[] = NULL::text[], + p_sender_address text = NULL::text, + p_smtp_server text = COALESCE(NULLIF(current_setting('adm.email_smtp_server'::text, true), ''::text), 'mail.company.ru'::text) + ) RETURNS text + LANGUAGE plpython3u + AS $$ +# -*- coding: utf-8 -*- + +# +# Отправка сообщений через функцию в базе данных. +# список адресатов в p_to p_copy p_blindcopy указывается через , или через ; +# +# select util.send_email('sergey.grinko@company.ru,grufos@mail.ru','Проверка заголовка','Текст письма','grufos@mail.ru','sergey.grinko@gmail.com'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма','sergey.grinko@gmail.com','grufos@mail.ru'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма', NULL, 'grufos@mail.ru'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма', NULL, 'grufos@mail.ru,sergey.grinko@gmail.com'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма', 'grufos@mail.ru'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма', 'sergey.grinko@gmail.com'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','

Hi!
How are you?
Here is the link you wanted.

'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','

Hi!
How are you?
Here is the link you wanted.

'); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','Текст письма', NULL, NULL, ARRAY['file.txt'], ARRAY['содержимое файла file.txt'::bytea]); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','

Hi!
How are you?
Here is the link you wanted.

', NULL, NULL, ARRAY['file.txt'], ARRAY['содержимое файла file.txt'::bytea]); +# select util.send_email('sergey.grinko@company.ru','Проверка заголовка','

Hi!
How are you?
Here is the link you wanted.

', NULL, NULL, ARRAY['file.txt'], ARRAY['содержимое файла file.txt'::bytea], ARRAY['zip']); + + import os + import io + import zipfile + import socket + import smtplib + from smtplib import SMTPException + from email.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText + from email.header import Header + from email.mime.base import MIMEBase + from email.mime.application import MIMEApplication + from email import encoders + from os.path import basename + + _sender_address = p_sender_address + if p_sender_address is None or p_sender_address == '' or p_sender_address.isspace(): + _sender_address = os.environ.get('EMAIL_HOSTNAME') + if _sender_address is None or _sender_address == '' or _sender_address.isspace(): + _sender_address = socket.gethostname() + " <" +socket.gethostname() + "@company.ru>" + + _recipients_list = [r.strip() for r in p_to.replace(';',',').split(',')] + + _msg = MIMEMultipart('alternative') + _msg['From'] = _sender_address + _msg['To'] = p_to.replace(';',',') + _msg['Subject'] = Header(p_subject, 'utf-8') + if not (p_copy is None or p_copy=='' or p_copy.isspace()): + _msg['CC'] = p_copy.replace(';',',') + _recipients_list = _recipients_list + [r.strip() for r in p_copy.replace(';',',').split(',')] + if not (p_blindcopy is None or p_blindcopy=='' or p_blindcopy.isspace()): + _msg['BCC'] = p_blindcopy.replace(';',',') + _recipients_list = _recipients_list + [r.strip() for r in p_blindcopy.replace(';',',').split(',')] + + if p_message: + if "" in p_message or "
" in p_message or "