Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: enable pg_dump cron #22

Merged
merged 10 commits into from
Aug 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/lint-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,5 @@ jobs:

- name: Run helm unittest
run: |
helm plugin install https://github.com/helm-unittest/helm-unittest
helm plugin install https://github.com/helm-unittest/helm-unittest.git
helm unittest --debug --color charts/*
54 changes: 54 additions & 0 deletions charts/cnpg-cluster/templates/backup-cron.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{{- if .Values.backup.enabled }}
apiVersion: batch/v1
kind: CronJob
metadata:
labels:
app: cnpg-backup-s3-client
name: backup-cron
spec:
schedule: {{ or .Values.backup.sqlDumpSchedule .Values.backup.schedule "0 0 * * *" }}
concurrencyPolicy: Forbid
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
labels:
app: cnpg-backup-s3-client
name: backup-cron
spec:
securityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
restartPolicy: Never
containers:
- name: s3-client
image: ghcr.io/socialgouv/docker/s3-client:1
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Values.backup.barmanObjectStore.s3Credentials.accessKeyId.name }}
key: bucket_access_key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.backup.barmanObjectStore.s3Credentials.secretAccessKey.name }}
key: bucket_secret_key
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
name: {{ .Values.backup.barmanObjectStore.s3Credentials.region.name }}
key: bucket_region
- name: AWS_ENDPOINT_URL
value: {{ .Values.backup.barmanObjectStore.endpointURL }}
- name: DESTINATION_PATH
value: {{ trimSuffix "/" .Values.backup.barmanObjectStore.destinationPath }}/dumps
envFrom:
- secretRef:
name: {{ .Values.backup.sqlDumpPgSecret }}
{{- end}}
4 changes: 2 additions & 2 deletions charts/cnpg-cluster/templates/cluster.cnpg.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ spec:
bootstrap:
{{- if .Values.recovery.enabled }}
recovery:
source: "{{ or .Values.recovery.externalClusterName "cnpg-cluster" }}"
source: "recovery-cluster"
{{- if .Values.recovery.targetTime }}
recoveryTarget:
targetTime: "{{ .Values.recovery.targetTime }}"
Expand Down Expand Up @@ -110,7 +110,7 @@ spec:

{{- if .Values.recovery.enabled }}
externalClusters:
- name: "{{ or .Values.recovery.externalClusterName "cnpg-cluster" }}"
- name: "recovery-cluster"
barmanObjectStore:
{{- toYaml .Values.recovery.barmanObjectStore | nindent 8 }}
{{- end }}
Expand Down
72 changes: 62 additions & 10 deletions charts/cnpg-cluster/tests/__snapshot__/cnpg-cluster_test.yaml.snap
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,52 @@ cluster with custom pgparams:
work_mem: 512MB
cluster with enabled backup and recovery:
1: |
concurrencyPolicy: Forbid
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
labels:
app: cnpg-backup-s3-client
name: backup-cron
spec:
containers:
- env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: bucket_access_key
name: minio
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: bucket_secret_key
name: minio
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: bucket_region
name: minio
- name: AWS_ENDPOINT_URL
value: http://minio:9000
- name: DESTINATION_PATH
value: s3://backups/dumps
envFrom:
- secretRef:
name: pg-user-app
image: ghcr.io/socialgouv/docker/s3-client:1
imagePullPolicy: IfNotPresent
name: s3-client
securityContext:
allowPrivilegeEscalation: false
restartPolicy: Never
securityContext:
fsGroup: 1001
runAsGroup: 1001
runAsUser: 1001
schedule: 4 5 * * 0
2: |
backup:
barmanObjectStore:
destinationPath: s3://backups/
Expand All @@ -13,14 +59,18 @@ cluster with enabled backup and recovery:
accessKeyId:
key: ACCESS_KEY_ID
name: minio
region:
key: DEFAULT_REGION
name: minio
secretAccessKey:
key: ACCESS_SECRET_KEY
name: minio
serverName: some-cluster
retentionPolicy: 30d
bootstrap:
recovery:
source: my-cluster-name-backup
recoveryTarget:
targetTime: 2020-11-26 15:22:00.00000+00
source: recovery-cluster
externalClusters:
- barmanObjectStore:
destinationPath: s3://backups/
Expand All @@ -32,8 +82,8 @@ cluster with enabled backup and recovery:
secretAccessKey:
key: ACCESS_SECRET_KEY
name: minio
serverName: some-cluster-to-recover
name: my-cluster-name-backup
serverName: my-cluster-to-restore
name: recovery-cluster
imageName: ghcr.io/cloudnative-pg/postgis:15
imagePullPolicy: IfNotPresent
instances: 1
Expand All @@ -45,18 +95,18 @@ cluster with enabled backup and recovery:
parameters: null
storage:
size: 8Gi
2: |
3: |
backupOwnerReference: self
cluster:
name: RELEASE-NAME-cnpg-cluster
schedule: 0 0 0 * * 0
schedule: 1 2 3 * * 0
cluster with recovery enabled:
1: |
bootstrap:
recovery:
recoveryTarget:
targetTime: 2020-11-26 15:22:00.00000+00
source: my-cluster-name-backup
source: recovery-cluster
externalClusters:
- barmanObjectStore:
destinationPath: s3://backups/
Expand All @@ -68,8 +118,8 @@ cluster with recovery enabled:
secretAccessKey:
key: ACCESS_SECRET_KEY
name: minio
serverName: recoveredCluster
name: my-cluster-name-backup
serverName: my-cluster-to-restore
name: recovery-cluster
imageName: ghcr.io/cloudnative-pg/postgis:15
imagePullPolicy: IfNotPresent
instances: 1
Expand All @@ -91,10 +141,12 @@ cluster with scheduled backup enabled:
accessKeyId:
key: ACCESS_KEY_ID
name: minio
region:
key: DEFAULT_REGION
name: minio
secretAccessKey:
key: ACCESS_SECRET_KEY
name: minio
serverName: some-cluster
retentionPolicy: 30d
bootstrap:
initdb:
Expand Down
31 changes: 25 additions & 6 deletions charts/cnpg-cluster/tests/cnpg-cluster_test.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
suite: test cnpg-cluster
templates:
- cluster.cnpg.yaml
- backup-cron.yaml
- scheduledbackup.cnpg.yaml
tests:
- it: cluster should render
Expand All @@ -11,9 +12,16 @@ tests:
- template: cluster.cnpg.yaml
hasDocuments:
count: 1
- template: backup-cron.yaml
hasDocuments:
count: 0
# waiting for release of https://github.com/helm-unittest/helm-unittest/commit/0ace2cc039c1fa33133ea1f26e7cae620443d42a
# - containsDocument:
# kind: CronJob
# not: true
- it: cluster with custom instances
values:
- values-instances.yaml
- ./values/instances.yaml
asserts:
- template: cluster.cnpg.yaml
isKind:
Expand All @@ -24,7 +32,7 @@ tests:
value: 2
- it: cluster with custom image tag
values:
- values-tag.yaml
- ./values/tag.yaml
asserts:
- template: cluster.cnpg.yaml
isKind:
Expand All @@ -35,7 +43,7 @@ tests:
value: ghcr.io/cloudnative-pg/postgis:12
- it: cluster with scheduled backup enabled
values:
- values-backup.yaml
- ./values/backup.yaml
asserts:
- template: cluster.cnpg.yaml
matchSnapshot:
Expand All @@ -44,22 +52,33 @@ tests:
equal:
path: spec.schedule
value: "1 2 3 * * 0"
- template: backup-cron.yaml
equal:
path: spec.schedule
value: "4 5 * * 0"
- template: backup-cron.yaml
hasDocuments:
count: 1
- it: cluster with recovery enabled
values:
- values-recovery.yaml
- ./values/recovery.yaml
asserts:
- template: cluster.cnpg.yaml
matchSnapshot:
path: spec
- it: cluster with enabled backup and recovery
values:
- values-backup-recovery.yaml
- ./values/recovery.yaml
- ./values/backup.yaml
asserts:
- matchSnapshot:
path: spec
- template: backup-cron.yaml
hasDocuments:
count: 1
- it: cluster with custom pgparams
values:
- values-pgparams.yaml
- ./values/pgparams.yaml
asserts:
- template: cluster.cnpg.yaml
matchSnapshot:
Expand Down
30 changes: 0 additions & 30 deletions charts/cnpg-cluster/tests/values-backup-recovery.yaml

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,15 +1,19 @@
backup:
enabled: true
schedule: "1 2 3 * * 0"
sqlDumpSchedule: "4 5 * * 0"
sqlDumpPgSecret: pg-user-app
barmanObjectStore:
destinationPath: s3://backups/
endpointURL: http://minio:9000
serverName: "some-cluster"
s3Credentials:
accessKeyId:
name: minio
key: ACCESS_KEY_ID
secretAccessKey:
name: minio
key: ACCESS_SECRET_KEY
region:
name: minio
key: DEFAULT_REGION
retentionPolicy: "30d"
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
recovery:
enabled: true
externalClusterName: my-cluster-name-backup
targetTime: "2020-11-26 15:22:00.00000+00"
barmanObjectStore:
destinationPath: s3://backups/
endpointURL: http://minio:9000
serverName: "recoveredCluster"
serverName: my-cluster-to-restore
s3Credentials:
accessKeyId:
name: minio
Expand Down
10 changes: 9 additions & 1 deletion charts/cnpg-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,12 @@ backup:
# this cron format has the seconds on the left
schedule: "0 0 0 * * 0"

# -- Schedule the SQL dump backups, for instance every Sunday
sqlDumpSchedule: "0 0 * * 0"

# -- Secret where pg_dump will look for DB credentials
sqlDumpPgSecret:

# -- RetentionPolicy is the retention policy to be used for backups and WALs (i.e. '60d').
# The retention policy is expressed in the form of XXu where XX is a positive integer and
# u is in [dwm] - days, weeks, months.
Expand Down Expand Up @@ -134,12 +140,12 @@ monitoring:
enablePodMonitor: false

superuserSecretName:
dbSecretName:

recovery:
enabled: false

# -- Name of the source cluster in the backups
externalClusterName:

# -- Time to restore from, in RFC3339 format https://datatracker.ietf.org/doc/html/rfc3339
# targetTime: "2020-11-26 15:22:00.00000+00"
Expand All @@ -149,6 +155,8 @@ recovery:
barmanObjectStore:
# destinationPath:
# endpointURL:
# name of the recovery server on the s3 backups
# serverName:
# s3Credentials:
# accessKeyId:
# name:
Expand Down