Compare commits

...

5 Commits

Author SHA1 Message Date
Frederik Ring
203bad3427 add missing sleep call 2021-04-08 08:27:21 +02:00
Frederik Ring
2892369677 sleep 10 minutes before pruning backups
if pruning happens immediatley after taking the backup a race condition
can occur where a backup that would theoretically be eligible won't be
pruned as the current backup run was very fast, putting the potential
candidate just at the very edge of the selected time window
2021-04-08 08:17:55 +02:00
Frederik Ring
204a0862c6 make log output more consistent 2021-04-03 09:33:11 +02:00
Frederik Ring
17a3523ded tweak comparison in README 2021-04-02 14:44:28 +02:00
Frederik Ring
c5ab795f9c add tag condition for ci 2021-04-02 14:24:48 +02:00
4 changed files with 27 additions and 18 deletions

View File

@@ -33,7 +33,10 @@ workflows:
version: 2
deploy:
jobs:
- build
- build:
filters:
tags:
only: /^v.*/
orbs:
docker: circleci/docker@1.0.1

View File

@@ -28,7 +28,7 @@ AWS_SECRET_ACCESS_KEY="<xxx>"
AWS_S3_BUCKET_NAME="<xxx>"
# This is the FQDN of your storage server, e.g. `storage.example.com`.
# You can leave it blank when working against AWS S3.
# Do not set this when working against AWS S3.
# AWS_ENDPOINT="<xxx>"
########### BACKUP PRUNING
@@ -80,7 +80,9 @@ volumes:
## Differences to `futurice/docker-volume-backup`
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image because of the following requirements:
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu`, making it very heavy. This version is roughly 500MB smaller in size.
- The original image is based on `ubuntu`, making it very heavy. This version is roughly 1/3 in compressed size.
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
- InfluxDB specific functionality was removed.

View File

@@ -12,7 +12,7 @@ function info {
echo -e "\n[INFO] $1\n"
}
info "Backup starting"
info "Preparing backup"
DOCKER_SOCK="/var/run/docker.sock"
if [ -S "$DOCKER_SOCK" ]; then
@@ -25,12 +25,12 @@ if [ -S "$DOCKER_SOCK" ]; then
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total"
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup"
echo "$CONTAINERS_TOTAL containers running on host in total."
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
else
CONTAINERS_TO_STOP_TOTAL="0"
CONTAINERS_TOTAL="0"
echo "Cannot access \"$DOCKER_SOCK\", won't look for containers to stop"
echo "Cannot access \"$DOCKER_SOCK\", won't look for containers to stop."
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
@@ -57,9 +57,9 @@ fi
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\""
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished"
echo "Upload finished."
fi
if [ -f "$BACKUP_FILENAME" ]; then
@@ -68,26 +68,29 @@ if [ -f "$BACKUP_FILENAME" ]; then
fi
info "Backup finished"
echo "Will wait for next scheduled backup"
echo "Will wait for next scheduled backup."
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME
rule_applies_to=$(mc rm --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found that match the configured retention period. Doing nothing."
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
echo "Doing nothing."
exit 0
fi
available=$(mc ls "backup-target/$bucket" | wc -l)
total=$(mc ls "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "$available" ]; then
echo "Using a retention of $BACKUP_RETENTION_DAYS days would prune all currently existing backups, will not continue."
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
echo "If this is what you want, please remove files manually instead of using this script."
exit 1
fi
mc rm --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
echo "Successfully pruned all backups older than ${BACKUP_RETENTION_DAYS} days"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi

View File

@@ -15,6 +15,7 @@ BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
@@ -27,9 +28,9 @@ source env.sh
mc alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION"
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
# Let cron take the wheel
echo "Starting cron in foreground"
echo "Starting cron in foreground."
crond -f -l 8