allow local storage of backups

This commit is contained in:
Frederik Ring
2021-08-19 09:25:53 +02:00
parent 4b59089e3d
commit 767c21ef65
6 changed files with 74 additions and 19 deletions

View File

@@ -77,13 +77,24 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
fi
fi
copy_backup () {
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
}
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished."
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Copying backup to local archive"
echo "Will copy to \"$BACKUP_ARCHIVE\"."
copy_backup "$BACKUP_ARCHIVE"
echo "Finished copying."
fi
if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
@@ -92,16 +103,12 @@ fi
info "Backup finished"
echo "Will wait for next scheduled backup."
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME
prune () {
target=$1
rule_applies_to=$(
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"backup-target/$bucket" \
"$target" \
| wc -l
)
if [ "$rule_applies_to" == "0" ]; then
@@ -110,7 +117,7 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
exit 0
fi
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
@@ -119,7 +126,21 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
fi
mc rm $MC_GLOBAL_OPTIONS \
--recursive -force \
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
--recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
}
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Pruning old backups from remote storage"
prune "backup-target/$bucket"
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Pruning old backups from local archive"
prune "$BACKUP_ARCHIVE"
fi
fi

View File

@@ -12,10 +12,12 @@ set -e
cat <<EOF > env.sh
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
@@ -23,14 +25,16 @@ AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
EOF
chmod a+x env.sh
source env.sh
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
mc $MC_GLOBAL_OPTIONS alias set backup-target \
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
fi
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."