forked from orgrim/pg_back
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pg_back.conf
178 lines (145 loc) · 6.09 KB
/
pg_back.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# pg_back configuration file
# PostgreSQL binaries path. Leave empty to search $PATH
bin_directory =
# Where to store the dumps and other files. It can include the
# {dbname} keyword that will be replaced by the name of the database
# being dumped.
backup_directory = /var/backups/postgresql
# Timestamp format to use in filenames of output files. Two values are
# possible: legacy and rfc3339. For example legacy is 2006-01-02_15-04-05, and
# rfc3339 is 2006-01-02T15:04:05-07:00. rfc3339 is the default, except on
# Windows where it is not possible to use the rfs3339 format in filename. Thus
# the only format on Windows is legacy: the option has no effect on Windows.
# timestamp_format = rfc3339
# PostgreSQL connection options. This are the usual libpq
# variables. dbname is the database used to dump globals, acl,
# configuration and pause replication. password is better set in
# ~/.pgpass
host =
port =
user =
dbname =
# List of database names to dump. When left empty, dump all
# databases. See with_templates to dump templates too. Separator is
# comma.
include_dbs =
# List of database names not to dump. Separator is comma.
exclude_dbs =
# When set to true, database templates are also dumped, either
# explicitly if listed in the include_dbs list or implicitly if
# include_dbs is empty.
with_templates = false
# Format of the dump, understood by pg_dump. Possible values are
# plain, custom, tar or directory.
format = custom
# When the format is directory, number of parallel jobs to dumps (-j
# option of pg_dump)
parallel_backup_jobs = 1
# When using a compressed binary format, e.g. custom or directory, adjust the
# compression level between 0 and 9. Use -1 to keep the default level of pg_dump.
compress_level = -1
# Compute checksum a checksum file for each dump that can be checked
# by the corresponding shaXsum -c command. Possible values are none to
# disable checksums, sha1, sha224, sha256, sha384, and sha512.
checksum_algorithm = none
# Encrypt the files produced, including globals and configuration.
encrypt = false
# Passphrase to use for encryption and decryption. The PGBK_CIPHER_PASS
# environment variable can be used alternatively.
cipher_pass =
# Keep orignal files after encrypting them.
encrypt_keep_source = false
# Purge dumps older than this number of days. If the interval has to
# be shorter than one day, use a duration with units, h for hours, m
# for minutes, s for seconds, us for microseconds or ns for
# nanoseconds, ex. 1h30m24s.
purge_older_than = 30
# When purging older dumps, always keep this minimum number of
# dumps. The default is 0, even if purge_older_than is 0 the dumps of
# the current run are kept, if one wants to remove all dumps and not
# keep anything, for example to just test for data corruption, then
# purge_older_than shall be a negative duration.
purge_min_keep = 0
# Number of pg_dump commands to run concurrently
jobs = 1
# inject these options to pg_dump
pg_dump_options =
# When dumping from a hot standby server, wait for exclusive locks to
# be released within this number of seconds. Abort if exclusive locks
# are still held. If a exclusive lock is granted and replication is
# paused, the lock is held until to replication is resumed, causing
# pg_dump to wait forever.
pause_timeout = 3600
# Commands to execute before dumping and after. The post-backup
# command is always executed even in case of failure.
pre_backup_hook =
post_backup_hook =
# Upload resulting files to a remote location. Possible values are: none,
# s3, sftp, gcs. The default is none, meaning no file will be uploaded.
upload = none
# Purge remote files. When uploading to a remote location, purge the remote
# files with the same rules as the local directory
# purge_remote = false
# AWS S3 Access information. Region and Bucket are mandatory. If no credential
# or profile is provided, defaults from aws sdk are used.
# s3_region =
# s3_bucket =
# s3_profile =
# s3_key_id =
# s3_secret =
# s3_endpoint =
# s3_force_path = false
# s3_tls = true
# SFTP Access information. If the user is empty, the current system user is
# used. Port defaults to 22. The password is also used as passphrase for any
# identity file given, it can be provided with the PGBK_SSH_PASS environment
# variable. PGBK_SSH_PASS is overridden by a value set here or on the command
# line. Use the directory to inform where to store files, it can be relative to
# the working directory of the SSH connection, the home directory of the remote
# user in most cases.
# sftp_host =
# sftp_port =
# sftp_user =
# sftp_password =
# sftp_directory =
# sftp_identity =
# sftp_ignore_hostkey = false
# Google Cloud Storage (GCS) Access information. Bucket is mandatory. If the
# path to the key file is empty, the GOOGLE_APPLICATION_CREDENTIALS environment
# variable is used.
# gcs_bucket =
# gcs_endpoint =
# gcs_keyfile =
# Azure Blob Storage access information. The container is mandatory. If the
# account name is left empty, an anonymous connection is used and the endpoint
# is used directly: this allows the use of a full URL to the container with a
# SAS token. When an account is provided, the URL is built by prepending the
# container name to the endpoint. The default endpoint is
# blob.core.windows.net. The AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_KEY are
# used when azure_account and azure_key are not set.
# azure_container =
# azure_endpoint =
# azure_account =
# azure_key =
# # Per database options. Use a ini section named the same as the
# # database. These options take precedence over the global values
# [dbname]
# format =
# parallel_backup_jobs =
# compress_level =
# checksum_algorithm =
# purge_older_than =
# purge_min_keep =
# # List of schemas and tables to dump or exlude from the dump.
# # Inclusion and exclusion rules of pg_dump apply, as well as
# # pattern rules. Separate schema/table names with a semicolon
# schemas =
# exclude_schemas =
# tables =
# exclude_tables =
# Include or exclude large objects in the dump. Leave the option commented to
# keep the default behaviour, see pg_dump -b.
# with_blobs = true
# # inject these options to pg_dump. Use an empty value to cancel the
# # global value of pg_dump_options
# pg_dump_options =