Compare commits

...

29 Commits
v1.6.0 ... v1

Author SHA1 Message Date
Frederik Ring
4c84674650 Merge pull request #20 from offen/gpg-testcase
Add testcase for gpg encryption
2021-08-23 14:47:11 +02:00
Frederik Ring
6fe81cdf2d add testcase for gpg encryption 2021-08-23 14:42:50 +02:00
Frederik Ring
b7ba0e08df prefix mtime param with a +, use -name param for passing pattern to find 2021-08-20 21:51:45 +02:00
Frederik Ring
b558a57de9 Merge pull request #17 from offen/local-prune
Use find instead of mc for pruning local backups
2021-08-20 10:09:46 +02:00
Frederik Ring
278df9b2f7 use find instead of mc for pruning local backups 2021-08-20 10:01:46 +02:00
Frederik Ring
0782af88f4 fix blank variable when creating target 2021-08-19 16:35:21 +02:00
Frederik Ring
f82577fcb5 add prefix option to entrypoint script 2021-08-19 13:55:15 +02:00
Frederik Ring
7f261a54b6 Merge pull request #15 from offen/prune-prefix
Allow passing prefix to limit pruning ops
2021-08-19 13:45:10 +02:00
Frederik Ring
0069faa7fd allow passing prefix to limit pruning ops 2021-08-19 13:41:19 +02:00
Frederik Ring
8c7ffc3d99 Merge pull request #14 from offen/backup-archive
allow local storage of backups
2021-08-19 11:14:04 +02:00
Frederik Ring
f6b40742b4 use go 1.17 in builder 2021-08-19 11:08:36 +02:00
Frederik Ring
767c21ef65 allow local storage of backups 2021-08-19 10:35:39 +02:00
Frederik Ring
4b59089e3d add note about how the endpoint does not include a protocol 2021-08-19 08:57:48 +02:00
Frederik Ring
8e90ce408a add note about potentially having to raise memory limit when backing up large files 2021-08-18 20:38:51 +02:00
Frederik Ring
510ae889e4 document AWS_ENDPOINT_PROTO option 2021-08-17 19:49:51 +02:00
Frederik Ring
e4bb183afa add note about applying memory limit when running in swarm mode 2021-08-05 21:56:19 +02:00
Frederik Ring
5fd6f66324 label container in cli test 2021-07-11 20:21:17 +02:00
Frederik Ring
da75d232f4 add test for using docker cli 2021-07-11 14:02:05 +02:00
Frederik Ring
8a385d22aa sleep longer before trying to backup 2021-07-11 10:39:39 +02:00
Frederik Ring
a3d7af2b42 add docs on manually triggering backup 2021-07-11 10:36:29 +02:00
Frederik Ring
c01555f052 check for correct file in tests 2021-07-11 10:30:16 +02:00
Frederik Ring
d29d0d7399 check for correct file in tests 2021-07-11 10:26:35 +02:00
Frederik Ring
a91353742d add test for interacting with a swarm stack 2021-07-11 10:21:47 +02:00
Frederik Ring
dda71c3a5d fix distinct service names ending up in joint service token 2021-07-10 08:09:55 +02:00
Frederik Ring
a8f013e16a Merge pull request #8 from offen/some-tests
Add integration test
2021-07-09 10:00:51 +02:00
Frederik Ring
44d65c1a67 assert that all containers are back up and running after backup 2021-07-09 09:57:05 +02:00
Frederik Ring
88d4326e61 define cron schedule that never runs 2021-07-09 09:50:59 +02:00
Frederik Ring
6d3e43680c add basic integration test 2021-07-09 09:47:07 +02:00
Frederik Ring
6ce197696a scaffold basic testing in CI 2021-07-09 08:15:27 +02:00
13 changed files with 454 additions and 45 deletions

View File

@@ -2,29 +2,24 @@ version: 2.1
jobs:
canary:
docker:
- image: cimg/base:2020.06
machine:
image: ubuntu-1604:202007-01
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Create container from image
name: Install gnupg
command: |
docker run -d offen/docker-volume-backup:canary
echo "Sleeping for 30s before checking if container is still running."
sleep 30
count=$(docker ps -q | wc -l)
if [[ $count != "1" ]]; then
echo "Expected one container to be running, found $count."
exit 1
fi
docker stop $(docker ps -q)
sudo apt-get install -y gnupg
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
./test.sh canary
build:
docker:

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
test

View File

@@ -1,7 +1,7 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.16-alpine as builder
FROM golang:1.17-alpine as builder
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION

View File

@@ -1,8 +1,8 @@
# docker-volume-backup
Backup Docker volumes to any S3 compatible storage.
Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
## Configuration
@@ -15,7 +15,7 @@ Backup targets, schedule and retention are configured in environment variables:
# template expression.
BACKUP_CRON_EXPRESSION="0 2 * * *"
BACKUP_FILENAME="offen-db-%Y-%m-%dT%H-%M-%S.tar.gz"
BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
########### BACKUP STORAGE
@@ -28,11 +28,36 @@ AWS_SECRET_ACCESS_KEY="<xxx>"
AWS_S3_BUCKET_NAME="<xxx>"
# This is the FQDN of your storage server, e.g. `storage.example.com`.
# Do not set this when working against AWS S3.
# Do not set this when working against AWS S3. If you need to set a
# specific protocol, you will need to use the option below.
# AWS_ENDPOINT="<xxx>"
# The protocol to be used when communicating with your storage server.
# Defaults to "https". You can set this to "http" when communicating with
# a different Docker container on the same host for example.
# AWS_ENDPOINT_PROTO="https"
# In addition to backing up you can also store backups locally. Pass in
# a local path to store your backups here if needed. You likely want to
# mount a local folder or Docker volume into that location when running
# the container. Local paths can also be subject to pruning of old
# backups as defined below.
# BACKUP_ARCHIVE="/archive"
########### BACKUP PRUNING
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
# The mechanism used for pruning backups is not very sophisticated
# and applies its rules to **all files in the target directory** by default,
# which means that if you are storing your backups next to other files,
# these might become subject to deletion too. When using this option
# make sure the backup files are stored in a directory used exclusively
# for storing them or to configure BACKUP_PRUNING_PREFIX to limit
# removal to certain files.
# Define this value to enable automatic pruning of old backups. The value
# declares the number of days for which a backup is kept.
@@ -47,6 +72,15 @@ AWS_S3_BUCKET_NAME="<xxx>"
# BACKUP_PRUNING_LEEWAY="10m"
# In case your target bucket or directory contains other files than the ones
# managed by this container, you can limit the scope of rotation by setting
# a prefix value. This would usually be the non-parametrized part of your
# BACKUP_FILENAME. E.g. if BACKUP_FILENAME is `db-backup-%Y-%m-%dT%H-%M-%S.tar.gz`,
# you can set BACKUP_PRUNING_PREFIX to `db-backup-` and make sure
# unrelated files are not affected.
# BACKUP_PRUNING_PREFIX="backup-"
########### BACKUP ENCRYPTION
# Backups can be encrypted using gpg in case a passphrase is given
@@ -102,6 +136,10 @@ services:
# to stop the container
- /var/run/docker.sock:/var/run/docker.sock:ro
- data:/backup/my-app-backup:ro
# If you mount a local directory or volume to `/archive` a local
# copy of the backup will be stored there. You can override the
# location inside of the container by setting `BACKUP_ARCHIVE`
# - /path/to/local_backups:/archive
volumes:
data:
```
@@ -112,6 +150,28 @@ By default, Docker Swarm will restart stopped containers automatically, even whe
---
When running in Swarm mode, it's also advised to set a hard memory limit on your service (~25MB should be enough in most cases, but if you backup large files above half a gigabyte or similar, you might have to raise this in case the backup exits with `Killed`):
```yml
services:
backup:
image: offen/docker-volume-backup:latest
deployment:
resources:
limits:
memory: 25M
```
## Manually triggering a backup
You can manually trigger a backup run outside of the defined cron schedule by executing the `backup` command inside the container:
```
docker exec <container_ref> backup
```
---
## Differences to `futurice/docker-volume-backup`
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:

View File

@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--format "{{.ID}}" \
docker ps -q \
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
> "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total."
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
@@ -72,19 +71,30 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
done
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
for SERVICE_NAME in "$(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u)"; do
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
docker service update --force $SERVICE_NAME
done
fi
fi
copy_backup () {
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
}
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished."
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Copying backup to local archive"
echo "Will copy to \"$BACKUP_ARCHIVE\"."
copy_backup "$BACKUP_ARCHIVE"
echo "Finished copying."
fi
if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
@@ -93,27 +103,79 @@ fi
info "Backup finished"
echo "Will wait for next scheduled backup."
probe_expired () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f -mtime "+${BACKUP_RETENTION_DAYS}"
fi
}
probe_all () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc ls $MC_GLOBAL_OPTIONS "$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f
fi
}
delete () {
local target=$1
local is_local=$2
if [ -z "$is_local" ]; then
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="${target}/${BACKUP_PRUNING_PREFIX}"
fi
mc rm $MC_GLOBAL_OPTIONS --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"$target"
else
find $target -name "${BACKUP_PRUNING_PREFIX:-*}" -type f -mtime "+${BACKUP_RETENTION_DAYS}" -delete
fi
}
prune () {
local target=$1
local is_local=$2
rule_applies_to=$(probe_expired "$target" "$is_local" | wc -l)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of ${BACKUP_RETENTION_DAYS} days."
echo "Doing nothing."
else
total=$(probe_all "$target" "$is_local" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
echo "If this is what you want, please remove files manually instead of using this script."
else
delete "$target" "$is_local"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi
fi
}
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME
rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
echo "Doing nothing."
exit 0
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Pruning old backups from remote storage"
prune "backup-target/$AWS_S3_BUCKET_NAME"
fi
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
echo "If this is what you want, please remove files manually instead of using this script."
exit 1
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Pruning old backups from local archive"
prune "$BACKUP_ARCHIVE" "local"
fi
mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi

View File

@@ -12,24 +12,30 @@ set -e
cat <<EOF > env.sh
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
EOF
chmod a+x env.sh
source env.sh
mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
mc $MC_GLOBAL_OPTIONS alias set backup-target \
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
fi
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."

64
test/cli/run.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/sh
set -e
cd $(dirname $0)
docker network create test_network
docker volume create backup_data
docker volume create app_data
docker run -d \
--name minio \
--network test_network \
--env MINIO_ROOT_USER=test \
--env MINIO_ROOT_PASSWORD=test \
--env MINIO_ACCESS_KEY=test \
--env MINIO_SECRET_KEY=GMusLtUmILge2by+z890kQ \
-v backup_data:/data \
minio/minio:RELEASE.2020-08-04T23-10-51Z server /data
docker exec minio mkdir -p /data/backup
docker run -d \
--name offen \
--network test_network \
--label "docker-volume-backup.stop-during-backup=true" \
-v app_data:/var/opt/offen/ \
offen/offen:latest
sleep 10
docker run -d \
--name backup \
--network test_network \
-v app_data:/backup/app_data \
-v /var/run/docker.sock:/var/run/docker.sock \
--env AWS_ACCESS_KEY_ID=test \
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
--env AWS_ENDPOINT=minio:9000 \
--env AWS_ENDPOINT_PROTO=http \
--env AWS_S3_BUCKET_NAME=backup \
--env BACKUP_FILENAME=test.tar.gz \
--env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
offen/docker-volume-backup:$TEST_VERSION
docker exec backup backup
docker run --rm -it \
-v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared backup."
if [ "$(docker ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen backup)
docker volume rm backup_data app_data
docker network rm test_network

1
test/compose/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
local

View File

@@ -0,0 +1,47 @@
version: '3'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
restart: always
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
backup_data:
app_data:

55
test/compose/run.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test.tar.gz.gpg > /tmp/test.tar.gz && tar -xf /tmp/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared remote backup."
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
echo "[TEST:PASS] Found relevant files in untared local backup."
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes

View File

@@ -0,0 +1,65 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:12.2-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
pg_data:

36
test/swarm/run.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/sh
set -e
cd $(dirname $0)
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \
-v test_stack_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
echo "[TEST:PASS] Found relevant files in untared backup."
if [ "$(docker ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack
docker swarm leave --force

17
test/test.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/sh
set -e
TEST_VERSION=${1:-canary}
for dir in $(ls -d -- */); do
test="${dir}run.sh"
echo "################################################"
echo "Now running $test"
echo "################################################"
echo ""
TEST_VERSION=$TEST_VERSION /bin/sh $test
echo ""
echo "$test passed"
echo ""
done