Compare commits

..

38 Commits

Author SHA1 Message Date
Frederik Ring
f82577fcb5 add prefix option to entrypoint script 2021-08-19 13:55:15 +02:00
Frederik Ring
7f261a54b6 Merge pull request #15 from offen/prune-prefix
Allow passing prefix to limit pruning ops
2021-08-19 13:45:10 +02:00
Frederik Ring
0069faa7fd allow passing prefix to limit pruning ops 2021-08-19 13:41:19 +02:00
Frederik Ring
8c7ffc3d99 Merge pull request #14 from offen/backup-archive
allow local storage of backups
2021-08-19 11:14:04 +02:00
Frederik Ring
f6b40742b4 use go 1.17 in builder 2021-08-19 11:08:36 +02:00
Frederik Ring
767c21ef65 allow local storage of backups 2021-08-19 10:35:39 +02:00
Frederik Ring
4b59089e3d add note about how the endpoint does not include a protocol 2021-08-19 08:57:48 +02:00
Frederik Ring
8e90ce408a add note about potentially having to raise memory limit when backing up large files 2021-08-18 20:38:51 +02:00
Frederik Ring
510ae889e4 document AWS_ENDPOINT_PROTO option 2021-08-17 19:49:51 +02:00
Frederik Ring
e4bb183afa add note about applying memory limit when running in swarm mode 2021-08-05 21:56:19 +02:00
Frederik Ring
5fd6f66324 label container in cli test 2021-07-11 20:21:17 +02:00
Frederik Ring
da75d232f4 add test for using docker cli 2021-07-11 14:02:05 +02:00
Frederik Ring
8a385d22aa sleep longer before trying to backup 2021-07-11 10:39:39 +02:00
Frederik Ring
a3d7af2b42 add docs on manually triggering backup 2021-07-11 10:36:29 +02:00
Frederik Ring
c01555f052 check for correct file in tests 2021-07-11 10:30:16 +02:00
Frederik Ring
d29d0d7399 check for correct file in tests 2021-07-11 10:26:35 +02:00
Frederik Ring
a91353742d add test for interacting with a swarm stack 2021-07-11 10:21:47 +02:00
Frederik Ring
dda71c3a5d fix distinct service names ending up in joint service token 2021-07-10 08:09:55 +02:00
Frederik Ring
a8f013e16a Merge pull request #8 from offen/some-tests
Add integration test
2021-07-09 10:00:51 +02:00
Frederik Ring
44d65c1a67 assert that all containers are back up and running after backup 2021-07-09 09:57:05 +02:00
Frederik Ring
88d4326e61 define cron schedule that never runs 2021-07-09 09:50:59 +02:00
Frederik Ring
6d3e43680c add basic integration test 2021-07-09 09:47:07 +02:00
Frederik Ring
6ce197696a scaffold basic testing in CI 2021-07-09 08:15:27 +02:00
Frederik Ring
ac3a231d2b Merge pull request #7 from offen/swarm-support
Support Docker in Swarm mode
2021-07-08 20:07:06 +02:00
Frederik Ring
054ab8fbe6 when stopped container was part of a stack service, update service instead 2021-07-08 19:54:04 +02:00
Frederik Ring
fa356137e8 inject proper version for mc command at compile time 2021-07-03 10:25:26 +02:00
Frederik Ring
07befda44d build mc from source, support arm/v7 2021-07-01 15:16:39 +02:00
Frederik Ring
c33ebc0c70 Merge pull request #5 from offen/multiarch
Build image for arm architectures
2021-07-01 14:22:54 +02:00
Frederik Ring
23c287bfc7 build image for arm architectures 2021-07-01 14:19:55 +02:00
Frederik Ring
5be3c36040 update alpine base image to 3.14 2021-06-28 20:25:21 +02:00
Frederik Ring
57afad5727 Merge pull request #4 from offen/stop-label
Allow for making container stop filter configurable
2021-06-26 21:19:35 +02:00
Frederik Ring
bafca7bb85 allow for making container stop filter configurable 2021-06-26 21:16:22 +02:00
Frederik Ring
84afc43fd8 Merge pull request #2 from offen/mc-extra-flags
Allow passing custom arguments to minio client
2021-05-25 07:37:00 +02:00
Frederik Ring
1af345061c use global options naming to be in line with minio docs 2021-05-25 07:35:21 +02:00
Frederik Ring
5368eb8c5e allow passing custom arguments to minio client 2021-05-24 20:34:30 +02:00
Frederik Ring
5978a897ad document leeway option when pruning 2021-04-08 17:24:44 +02:00
Frederik Ring
203bad3427 add missing sleep call 2021-04-08 08:27:21 +02:00
Frederik Ring
2892369677 sleep 10 minutes before pruning backups
if pruning happens immediatley after taking the backup a race condition
can occur where a backup that would theoretically be eligible won't be
pruned as the current backup run was very fast, putting the potential
candidate just at the very edge of the selected time window
2021-04-08 08:17:55 +02:00
14 changed files with 486 additions and 46 deletions

View File

@@ -1,40 +1,61 @@
version: 2.1
jobs:
build:
docker:
- image: cimg/base:2020.06
canary:
machine:
image: ubuntu-1604:202007-01
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker
- run:
name: Build
command: make build
- run:
name: Check if image needs to be pushed
command: |
if [[ -z "$CIRCLE_TAG" ]]; then
echo "Not a git tag, nothing to do ..."
circleci-agent step halt
fi
docker build . -t offen/docker-volume-backup:canary
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
./test.sh canary
build:
docker:
- image: cimg/base:2020.06
environment:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
command: |
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:$CIRCLE_TAG
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:latest
docker push offen/docker-volume-backup:$CIRCLE_TAG
docker push offen/docker-volume-backup:latest
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
docker run --rm --privileged linuxkit/binfmt:v0.8
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
-t offen/docker-volume-backup:$CIRCLE_TAG \
-t offen/docker-volume-backup:latest \
. --push
workflows:
version: 2
deploy:
docker_image:
jobs:
- canary:
filters:
tags:
ignore: /^v.*/
- build:
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
test

View File

@@ -1,16 +1,20 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MIT
# SPDX-License-Identifier: MPL-2.0
FROM alpine:3.13
FROM golang:1.17-alpine as builder
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
FROM alpine:3.14
WORKDIR /root
RUN apk add --update ca-certificates docker openrc gnupg
RUN update-ca-certificates
RUN rc-update add docker boot
RUN wget https://dl.min.io/client/mc/release/linux-amd64/mc && \
chmod +x mc && \
mv mc /usr/bin/mc
COPY --from=builder /go/bin/mc /usr/bin/mc
RUN mc --version
COPY src/backup.sh src/entrypoint.sh /root/
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \

View File

@@ -1,5 +0,0 @@
DOCKER_TAG ?= local
.PHONY: build
build:
@docker build -t offen/docker-volume-backup:$(DOCKER_TAG) .

104
README.md
View File

@@ -1,8 +1,8 @@
# docker-volume-backup
Backup Docker volumes to any S3 compatible storage.
Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
## Configuration
@@ -15,7 +15,7 @@ Backup targets, schedule and retention are configured in environment variables:
# template expression.
BACKUP_CRON_EXPRESSION="0 2 * * *"
BACKUP_FILENAME="offen-db-%Y-%m-%dT%H-%M-%S.tar.gz"
BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
########### BACKUP STORAGE
@@ -28,21 +28,83 @@ AWS_SECRET_ACCESS_KEY="<xxx>"
AWS_S3_BUCKET_NAME="<xxx>"
# This is the FQDN of your storage server, e.g. `storage.example.com`.
# Do not set this when working against AWS S3.
# Do not set this when working against AWS S3. If you need to set a
# specific protocol, you will need to use the option below.
# AWS_ENDPOINT="<xxx>"
# The protocol to be used when communicating with your storage server.
# Defaults to "https". You can set this to "http" when communicating with
# a different Docker container on the same host for example.
# AWS_ENDPOINT_PROTO="https"
# In addition to backing up you can also store backups locally. Pass in
# a local path to store your backups here if needed. You likely want to
# mount a local folder or Docker volume into that location when running
# the container. Local paths can also be subject to pruning of old
# backups as defined below.
# BACKUP_ARCHIVE="/archive"
########### BACKUP PRUNING
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
# The mechanism used for pruning backups is not very sophisticated
# and applies its rules to **all files in the target directory** by default,
# which means that if you are storing your backups next to other files,
# these might become subject to deletion too. When using this option
# make sure the backup files are stored in a directory used exclusively
# for storing them or to configure BACKUP_PRUNING_PREFIX to limit
# removal to certain files.
# Define this value to enable automatic pruning of old backups. The value
# declares the number of days for which a backup is kept.
# BACKUP_RETENTION_DAYS="7"
# In case the duration a backup takes fluctuates noticeably in your setup
# you can adjust this setting to make sure there are no race conditions
# between the backup finishing and the pruning not deleting backups that
# sit on the very edge of the time window. Set this value to a duration
# that is expected to be bigger than the maximum difference of backups.
# Valid values have a suffix of (s)econds, (m)inutes, (h)ours, or (d)ays.
# BACKUP_PRUNING_LEEWAY="10m"
# In case your target bucket or directory contains other files than the ones
# managed by this container, you can limit the scope of rotation by setting
# a prefix value. This would usually be the non-parametrized part of your
# BACKUP_FILENAME. E.g. if BACKUP_FILENAME is `db-backup-%Y-%m-%dT%H-%M-%S.tar.gz`,
# you can set BACKUP_PRUNING_PREFIX to `db-backup-` and make sure
# unrelated files are not affected.
# BACKUP_PRUNING_PREFIX="backup-"
########### BACKUP ENCRYPTION
# Backups can be encrypted using gpg in case a passphrase is given
# GPG_PASSPHRASE="<xxx>"
########### STOPPING CONTAINERS DURING BACKUP
# Containers can be stopped by applying a
# `docker-volume-backup.stop-during-backup` label. By default, all containers
# that are labeled with `true` will be stopped. If you need more fine grained
# control (e.g. when running multiple containers based on this image), you can
# override this default by specifying a different value here.
# BACKUP_STOP_CONTAINER_LABEL="service1"
########### MINIO CLIENT CONFIGURATION
# Pass these additional flags to all MinIO client `mc` invocations.
# This can be used for example to pass `--insecure` when using self
# signed certificates, or passing `--debug` to gain insights on
# unexpected behavior.
# MC_GLOBAL_OPTIONS="<xxx>"
```
## Example in a docker-compose setup
@@ -74,10 +136,42 @@ services:
# to stop the container
- /var/run/docker.sock:/var/run/docker.sock:ro
- data:/backup/my-app-backup:ro
# If you mount a local directory or volume to `/archive` a local
# copy of the backup will be stored there. You can override the
# location inside of the container by setting `BACKUP_ARCHIVE`
# - /path/to/local_backups:/archive
volumes:
data:
```
## Using with Docker Swarm
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped. If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions. A restart policy of `always` is not compatible with this tool.
---
When running in Swarm mode, it's also advised to set a hard memory limit on your service (~25MB should be enough in most cases, but if you backup large files above half a gigabyte or similar, you might have to raise this in case the backup exits with `Killed`):
```yml
services:
backup:
image: offen/docker-volume-backup:latest
deployment:
resources:
limits:
memory: 25M
```
## Manually triggering a backup
You can manually trigger a backup run outside of the defined cron schedule by executing the `backup` command inside the container:
```
docker exec <container_ref> backup
```
---
## Differences to `futurice/docker-volume-backup`
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
@@ -86,3 +180,5 @@ This image is heavily inspired by the `futurice/docker-volume-backup`. We decide
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
- InfluxDB specific functionality was removed.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.

View File

@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--format "{{.ID}}" \
--filter "label=docker-volume-backup.stop-during-backup=true" \
docker ps -q \
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
> "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total."
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
@@ -39,7 +38,7 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
fi
info "Creating backup"
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
if [ ! -z "$GPG_PASSPHRASE" ]; then
@@ -51,17 +50,51 @@ if [ ! -z "$GPG_PASSPHRASE" ]; then
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Starting containers back up"
docker start $CONTAINERS_TO_STOP
info "Starting containers/services back up"
# The container might be part of a stack when running in swarm mode, so
# its parent service needs to be restarted instead once backup is finished.
SERVICES_REQUIRING_UPDATE=""
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
SWARM_SERVICE_NAME=$(
docker inspect \
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
$CONTAINER_ID
)
if [ -z "$SWARM_SERVICE_NAME" ]; then
echo "Restarting $(docker start $CONTAINER_ID)"
else
echo "Removing $(docker rm $CONTAINER_ID)"
# Multiple containers might belong to the same service, so they will
# be restarted only after all names are known.
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
fi
done
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
docker service update --force $SERVICE_NAME
done
fi
fi
copy_backup () {
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
}
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to remote storage"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
mc cp "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
echo "Upload finished."
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Copying backup to local archive"
echo "Will copy to \"$BACKUP_ARCHIVE\"."
copy_backup "$BACKUP_ARCHIVE"
echo "Finished copying."
fi
if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
@@ -70,18 +103,25 @@ fi
info "Backup finished"
echo "Will wait for next scheduled backup."
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
bucket=$AWS_S3_BUCKET_NAME
prune () {
target=$1
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
target="$target/${BACKUP_PRUNING_PREFIX}"
fi
rule_applies_to=$(mc rm --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
rule_applies_to=$(
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"$target" \
| wc -l
)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
echo "Doing nothing."
exit 0
fi
total=$(mc ls "backup-target/$bucket" | wc -l)
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
if [ "$rule_applies_to" == "$total" ]; then
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
@@ -89,6 +129,22 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
exit 1
fi
mc rm --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
mc rm $MC_GLOBAL_OPTIONS \
--recursive --force \
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
}
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
info "Pruning old backups"
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
sleep "$BACKUP_PRUNING_LEEWAY"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Pruning old backups from remote storage"
prune "backup-target/$bucket"
fi
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Pruning old backups from local archive"
prune "$BACKUP_ARCHIVE"
fi
fi

View File

@@ -12,19 +12,30 @@ set -e
cat <<EOF > env.sh
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
EOF
chmod a+x env.sh
source env.sh
mc alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
mc $MC_GLOBAL_OPTIONS alias set backup-target \
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
fi
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."

64
test/cli/run.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/sh
set -e
cd $(dirname $0)
docker network create test_network
docker volume create backup_data
docker volume create app_data
docker run -d \
--name minio \
--network test_network \
--env MINIO_ROOT_USER=test \
--env MINIO_ROOT_PASSWORD=test \
--env MINIO_ACCESS_KEY=test \
--env MINIO_SECRET_KEY=GMusLtUmILge2by+z890kQ \
-v backup_data:/data \
minio/minio:RELEASE.2020-08-04T23-10-51Z server /data
docker exec minio mkdir -p /data/backup
docker run -d \
--name offen \
--network test_network \
--label "docker-volume-backup.stop-during-backup=true" \
-v app_data:/var/opt/offen/ \
offen/offen:latest
sleep 10
docker run -d \
--name backup \
--network test_network \
-v app_data:/backup/app_data \
-v /var/run/docker.sock:/var/run/docker.sock \
--env AWS_ACCESS_KEY_ID=test \
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
--env AWS_ENDPOINT=minio:9000 \
--env AWS_ENDPOINT_PROTO=http \
--env AWS_S3_BUCKET_NAME=backup \
--env BACKUP_FILENAME=test.tar.gz \
--env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
offen/docker-volume-backup:$TEST_VERSION
docker exec backup backup
docker run --rm -it \
-v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared backup."
if [ "$(docker ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen backup)
docker volume rm backup_data app_data
docker network rm test_network

1
test/compose/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
local

View File

@@ -0,0 +1,43 @@
version: '3'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
restart: always
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
backup_data:
app_data:

32
test/compose/run.sh Executable file
View File

@@ -0,0 +1,32 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared remote backup."
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
echo "[TEST:PASS] Found relevant files in untared local backup."
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker-compose down --volumes

View File

@@ -0,0 +1,63 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:12.2-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
pg_data:

36
test/swarm/run.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/sh
set -e
cd $(dirname $0)
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \
-v test_stack_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
echo "[TEST:PASS] Found relevant files in untared backup."
if [ "$(docker ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack
docker swarm leave --force

17
test/test.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/sh
set -e
TEST_VERSION=${1:-canary}
for dir in $(ls -d -- */); do
test="${dir}run.sh"
echo "################################################"
echo "Now running $test"
echo "################################################"
echo ""
TEST_VERSION=$TEST_VERSION /bin/sh $test
echo ""
echo "$test passed"
echo ""
done