Compare commits

...

9 Commits

Author SHA1 Message Date
Frederik Ring
5be3c36040 update alpine base image to 3.14 2021-06-28 20:25:21 +02:00
Frederik Ring
57afad5727 Merge pull request #4 from offen/stop-label
Allow for making container stop filter configurable
2021-06-26 21:19:35 +02:00
Frederik Ring
bafca7bb85 allow for making container stop filter configurable 2021-06-26 21:16:22 +02:00
Frederik Ring
84afc43fd8 Merge pull request #2 from offen/mc-extra-flags
Allow passing custom arguments to minio client
2021-05-25 07:37:00 +02:00
Frederik Ring
1af345061c use global options naming to be in line with minio docs 2021-05-25 07:35:21 +02:00
Frederik Ring
5368eb8c5e allow passing custom arguments to minio client 2021-05-24 20:34:30 +02:00
Frederik Ring
5978a897ad document leeway option when pruning 2021-04-08 17:24:44 +02:00
Frederik Ring
203bad3427 add missing sleep call 2021-04-08 08:27:21 +02:00
Frederik Ring
2892369677 sleep 10 minutes before pruning backups
if pruning happens immediatley after taking the backup a race condition
can occur where a backup that would theoretically be eligible won't be
pruned as the current backup run was very fast, putting the potential
candidate just at the very edge of the selected time window
2021-04-08 08:17:55 +02:00
4 changed files with 42 additions and 7 deletions

View File

@@ -1,7 +1,7 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de> # Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
FROM alpine:3.13 FROM alpine:3.14
WORKDIR /root WORKDIR /root

View File

@@ -38,11 +38,39 @@ AWS_S3_BUCKET_NAME="<xxx>"
# BACKUP_RETENTION_DAYS="7" # BACKUP_RETENTION_DAYS="7"
# In case the duration a backup takes fluctuates noticeably in your setup
# you can adjust this setting to make sure there are no race conditions
# between the backup finishing and the pruning not deleting backups that
# sit on the very edge of the time window. Set this value to a duration
# that is expected to be bigger than the maximum difference of backups.
# Valid values have a suffix of (s)econds, (m)inutes, (h)ours, or (d)ays.
# BACKUP_PRUNING_LEEWAY="10m"
########### BACKUP ENCRYPTION ########### BACKUP ENCRYPTION
# Backups can be encrypted using gpg in case a passphrase is given # Backups can be encrypted using gpg in case a passphrase is given
# GPG_PASSPHRASE="<xxx>" # GPG_PASSPHRASE="<xxx>"
########### STOPPING CONTAINERS DURING BACKUP
# Containers can be stopped by applying a
# `docker-volume-backup.stop-during-backup` label. By default, all containers
# that are labeled with `true` will be stopped. If you need more fine grained
# control (e.g. when running multiple containers based on this image), you can
# override this default by specifying a different value here.
# BACKUP_STOP_CONTAINER_LABEL="service1"
########### MINIO CLIENT CONFIGURATION
# Pass these additional flags to all MinIO client `mc` invocations.
# This can be used for example to pass `--insecure` when using self
# signed certificates, or passing `--debug` to gain insights on
# unexpected behavior.
# MC_GLOBAL_OPTIONS="<xxx>"
``` ```
## Example in a docker-compose setup ## Example in a docker-compose setup

View File

@@ -19,7 +19,7 @@ if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)" TEMPFILE="$(mktemp)"
docker ps \ docker ps \
--format "{{.ID}}" \ --format "{{.ID}}" \
--filter "label=docker-volume-backup.stop-during-backup=true" \ --filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
> "$TEMPFILE" > "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')" CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)" CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
@@ -58,7 +58,7 @@ fi
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage" info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"." echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME" mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished." echo "Upload finished."
fi fi
@@ -72,16 +72,18 @@ echo "Will wait for next scheduled backup."
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups" info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME bucket=$AWS_S3_BUCKET_NAME
rule_applies_to=$(mc rm --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l) rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "0" ]; then if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days." echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
echo "Doing nothing." echo "Doing nothing."
exit 0 exit 0
fi fi
total=$(mc ls "backup-target/$bucket" | wc -l) total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue." echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
@@ -89,6 +91,6 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
exit 1 exit 1
fi fi
mc rm --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days." echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi fi

View File

@@ -15,16 +15,21 @@ BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"} BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}" BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}" AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}" AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}" GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
EOF EOF
chmod a+x env.sh chmod a+x env.sh
source env.sh source env.sh
mc alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY" mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
# Add our cron entry, and direct stdout & stderr to Docker commands stdout # Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION." echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."