mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0782af88f4 | ||
|
|
f82577fcb5 | ||
|
|
7f261a54b6 | ||
|
|
0069faa7fd | ||
|
|
8c7ffc3d99 | ||
|
|
f6b40742b4 | ||
|
|
767c21ef65 | ||
|
|
4b59089e3d | ||
|
|
8e90ce408a | ||
|
|
510ae889e4 | ||
|
|
e4bb183afa | ||
|
|
5fd6f66324 | ||
|
|
da75d232f4 | ||
|
|
8a385d22aa | ||
|
|
a3d7af2b42 | ||
|
|
c01555f052 | ||
|
|
d29d0d7399 | ||
|
|
a91353742d |
@@ -13,10 +13,9 @@ jobs:
|
|||||||
docker build . -t offen/docker-volume-backup:canary
|
docker build . -t offen/docker-volume-backup:canary
|
||||||
- run:
|
- run:
|
||||||
name: Run tests
|
name: Run tests
|
||||||
|
working_directory: ~/docker-volume-backup/test
|
||||||
command: |
|
command: |
|
||||||
for test in test/**/test.sh; do
|
./test.sh canary
|
||||||
/bin/sh $test
|
|
||||||
done
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
docker:
|
docker:
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.16-alpine as builder
|
FROM golang:1.17-alpine as builder
|
||||||
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
|
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
|
||||||
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
|
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
|
||||||
|
|
||||||
|
|||||||
68
README.md
68
README.md
@@ -1,8 +1,8 @@
|
|||||||
# docker-volume-backup
|
# docker-volume-backup
|
||||||
|
|
||||||
Backup Docker volumes to any S3 compatible storage.
|
Backup Docker volumes locally or to any S3 compatible storage.
|
||||||
|
|
||||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
|
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ Backup targets, schedule and retention are configured in environment variables:
|
|||||||
# template expression.
|
# template expression.
|
||||||
|
|
||||||
BACKUP_CRON_EXPRESSION="0 2 * * *"
|
BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
BACKUP_FILENAME="offen-db-%Y-%m-%dT%H-%M-%S.tar.gz"
|
BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
|
||||||
########### BACKUP STORAGE
|
########### BACKUP STORAGE
|
||||||
|
|
||||||
@@ -28,11 +28,36 @@ AWS_SECRET_ACCESS_KEY="<xxx>"
|
|||||||
AWS_S3_BUCKET_NAME="<xxx>"
|
AWS_S3_BUCKET_NAME="<xxx>"
|
||||||
|
|
||||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||||
# Do not set this when working against AWS S3.
|
# Do not set this when working against AWS S3. If you need to set a
|
||||||
|
# specific protocol, you will need to use the option below.
|
||||||
|
|
||||||
# AWS_ENDPOINT="<xxx>"
|
# AWS_ENDPOINT="<xxx>"
|
||||||
|
|
||||||
|
# The protocol to be used when communicating with your storage server.
|
||||||
|
# Defaults to "https". You can set this to "http" when communicating with
|
||||||
|
# a different Docker container on the same host for example.
|
||||||
|
|
||||||
|
# AWS_ENDPOINT_PROTO="https"
|
||||||
|
|
||||||
|
# In addition to backing up you can also store backups locally. Pass in
|
||||||
|
# a local path to store your backups here if needed. You likely want to
|
||||||
|
# mount a local folder or Docker volume into that location when running
|
||||||
|
# the container. Local paths can also be subject to pruning of old
|
||||||
|
# backups as defined below.
|
||||||
|
|
||||||
|
# BACKUP_ARCHIVE="/archive"
|
||||||
|
|
||||||
########### BACKUP PRUNING
|
########### BACKUP PRUNING
|
||||||
|
|
||||||
|
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
|
||||||
|
# The mechanism used for pruning backups is not very sophisticated
|
||||||
|
# and applies its rules to **all files in the target directory** by default,
|
||||||
|
# which means that if you are storing your backups next to other files,
|
||||||
|
# these might become subject to deletion too. When using this option
|
||||||
|
# make sure the backup files are stored in a directory used exclusively
|
||||||
|
# for storing them or to configure BACKUP_PRUNING_PREFIX to limit
|
||||||
|
# removal to certain files.
|
||||||
|
|
||||||
# Define this value to enable automatic pruning of old backups. The value
|
# Define this value to enable automatic pruning of old backups. The value
|
||||||
# declares the number of days for which a backup is kept.
|
# declares the number of days for which a backup is kept.
|
||||||
|
|
||||||
@@ -47,6 +72,15 @@ AWS_S3_BUCKET_NAME="<xxx>"
|
|||||||
|
|
||||||
# BACKUP_PRUNING_LEEWAY="10m"
|
# BACKUP_PRUNING_LEEWAY="10m"
|
||||||
|
|
||||||
|
# In case your target bucket or directory contains other files than the ones
|
||||||
|
# managed by this container, you can limit the scope of rotation by setting
|
||||||
|
# a prefix value. This would usually be the non-parametrized part of your
|
||||||
|
# BACKUP_FILENAME. E.g. if BACKUP_FILENAME is `db-backup-%Y-%m-%dT%H-%M-%S.tar.gz`,
|
||||||
|
# you can set BACKUP_PRUNING_PREFIX to `db-backup-` and make sure
|
||||||
|
# unrelated files are not affected.
|
||||||
|
|
||||||
|
# BACKUP_PRUNING_PREFIX="backup-"
|
||||||
|
|
||||||
########### BACKUP ENCRYPTION
|
########### BACKUP ENCRYPTION
|
||||||
|
|
||||||
# Backups can be encrypted using gpg in case a passphrase is given
|
# Backups can be encrypted using gpg in case a passphrase is given
|
||||||
@@ -102,6 +136,10 @@ services:
|
|||||||
# to stop the container
|
# to stop the container
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
|
# If you mount a local directory or volume to `/archive` a local
|
||||||
|
# copy of the backup will be stored there. You can override the
|
||||||
|
# location inside of the container by setting `BACKUP_ARCHIVE`
|
||||||
|
# - /path/to/local_backups:/archive
|
||||||
volumes:
|
volumes:
|
||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
@@ -112,6 +150,28 @@ By default, Docker Swarm will restart stopped containers automatically, even whe
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
When running in Swarm mode, it's also advised to set a hard memory limit on your service (~25MB should be enough in most cases, but if you backup large files above half a gigabyte or similar, you might have to raise this in case the backup exits with `Killed`):
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
deployment:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 25M
|
||||||
|
```
|
||||||
|
|
||||||
|
## Manually triggering a backup
|
||||||
|
|
||||||
|
You can manually trigger a backup run outside of the defined cron schedule by executing the `backup` command inside the container:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker exec <container_ref> backup
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Differences to `futurice/docker-volume-backup`
|
## Differences to `futurice/docker-volume-backup`
|
||||||
|
|
||||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|||||||
@@ -77,13 +77,24 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
copy_backup () {
|
||||||
|
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
|
||||||
|
}
|
||||||
|
|
||||||
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||||
info "Uploading backup to remote storage"
|
info "Uploading backup to remote storage"
|
||||||
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
|
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
|
||||||
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
|
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
|
||||||
echo "Upload finished."
|
echo "Upload finished."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -d "$BACKUP_ARCHIVE" ]; then
|
||||||
|
info "Copying backup to local archive"
|
||||||
|
echo "Will copy to \"$BACKUP_ARCHIVE\"."
|
||||||
|
copy_backup "$BACKUP_ARCHIVE"
|
||||||
|
echo "Finished copying."
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "$BACKUP_FILENAME" ]; then
|
if [ -f "$BACKUP_FILENAME" ]; then
|
||||||
info "Cleaning up"
|
info "Cleaning up"
|
||||||
rm -vf "$BACKUP_FILENAME"
|
rm -vf "$BACKUP_FILENAME"
|
||||||
@@ -92,16 +103,16 @@ fi
|
|||||||
info "Backup finished"
|
info "Backup finished"
|
||||||
echo "Will wait for next scheduled backup."
|
echo "Will wait for next scheduled backup."
|
||||||
|
|
||||||
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
prune () {
|
||||||
info "Pruning old backups"
|
target=$1
|
||||||
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
|
if [ ! -z "$BACKUP_PRUNING_PREFIX" ]; then
|
||||||
sleep "$BACKUP_PRUNING_LEEWAY"
|
target="$target/${BACKUP_PRUNING_PREFIX}"
|
||||||
bucket=$AWS_S3_BUCKET_NAME
|
fi
|
||||||
|
|
||||||
rule_applies_to=$(
|
rule_applies_to=$(
|
||||||
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
|
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
|
||||||
--older-than "${BACKUP_RETENTION_DAYS}d" \
|
--older-than "${BACKUP_RETENTION_DAYS}d" \
|
||||||
"backup-target/$bucket" \
|
"$target" \
|
||||||
| wc -l
|
| wc -l
|
||||||
)
|
)
|
||||||
if [ "$rule_applies_to" == "0" ]; then
|
if [ "$rule_applies_to" == "0" ]; then
|
||||||
@@ -110,7 +121,7 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
|
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
|
||||||
|
|
||||||
if [ "$rule_applies_to" == "$total" ]; then
|
if [ "$rule_applies_to" == "$total" ]; then
|
||||||
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
|
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
|
||||||
@@ -119,7 +130,21 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
mc rm $MC_GLOBAL_OPTIONS \
|
mc rm $MC_GLOBAL_OPTIONS \
|
||||||
--recursive -force \
|
--recursive --force \
|
||||||
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
|
||||||
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||||
|
info "Pruning old backups"
|
||||||
|
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
|
||||||
|
sleep "$BACKUP_PRUNING_LEEWAY"
|
||||||
|
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||||
|
info "Pruning old backups from remote storage"
|
||||||
|
prune "backup-target/$AWS_S3_BUCKET_NAME"
|
||||||
|
fi
|
||||||
|
if [ -d "$BACKUP_ARCHIVE" ]; then
|
||||||
|
info "Pruning old backups from local archive"
|
||||||
|
prune "$BACKUP_ARCHIVE"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -12,10 +12,13 @@ set -e
|
|||||||
cat <<EOF > env.sh
|
cat <<EOF > env.sh
|
||||||
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
|
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
|
||||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||||
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
|
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
|
||||||
|
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
||||||
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
||||||
|
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
|
||||||
|
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
||||||
|
|
||||||
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
||||||
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
||||||
@@ -23,14 +26,16 @@ AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
|
|||||||
|
|
||||||
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
||||||
|
|
||||||
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
|
||||||
|
|
||||||
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
|
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
|
||||||
EOF
|
EOF
|
||||||
chmod a+x env.sh
|
chmod a+x env.sh
|
||||||
source env.sh
|
source env.sh
|
||||||
|
|
||||||
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||||
|
mc $MC_GLOBAL_OPTIONS alias set backup-target \
|
||||||
|
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
|
||||||
|
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||||
|
fi
|
||||||
|
|
||||||
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
||||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||||
|
|||||||
64
test/cli/run.sh
Executable file
64
test/cli/run.sh
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
docker network create test_network
|
||||||
|
docker volume create backup_data
|
||||||
|
docker volume create app_data
|
||||||
|
|
||||||
|
docker run -d \
|
||||||
|
--name minio \
|
||||||
|
--network test_network \
|
||||||
|
--env MINIO_ROOT_USER=test \
|
||||||
|
--env MINIO_ROOT_PASSWORD=test \
|
||||||
|
--env MINIO_ACCESS_KEY=test \
|
||||||
|
--env MINIO_SECRET_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
|
-v backup_data:/data \
|
||||||
|
minio/minio:RELEASE.2020-08-04T23-10-51Z server /data
|
||||||
|
|
||||||
|
docker exec minio mkdir -p /data/backup
|
||||||
|
|
||||||
|
docker run -d \
|
||||||
|
--name offen \
|
||||||
|
--network test_network \
|
||||||
|
--label "docker-volume-backup.stop-during-backup=true" \
|
||||||
|
-v app_data:/var/opt/offen/ \
|
||||||
|
offen/offen:latest
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
docker run -d \
|
||||||
|
--name backup \
|
||||||
|
--network test_network \
|
||||||
|
-v app_data:/backup/app_data \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--env AWS_ACCESS_KEY_ID=test \
|
||||||
|
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
|
--env AWS_ENDPOINT=minio:9000 \
|
||||||
|
--env AWS_ENDPOINT_PROTO=http \
|
||||||
|
--env AWS_S3_BUCKET_NAME=backup \
|
||||||
|
--env BACKUP_FILENAME=test.tar.gz \
|
||||||
|
--env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
|
||||||
|
offen/docker-volume-backup:$TEST_VERSION
|
||||||
|
|
||||||
|
docker exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v backup_data:/data alpine \
|
||||||
|
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||||
|
|
||||||
|
echo "[TEST:PASS] Found relevant files in untared backup."
|
||||||
|
|
||||||
|
if [ "$(docker ps -q | wc -l)" != "3" ]; then
|
||||||
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
|
docker ps
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[TEST:PASS] All containers running post backup."
|
||||||
|
|
||||||
|
docker rm $(docker stop minio offen backup)
|
||||||
|
docker volume rm backup_data app_data
|
||||||
|
docker network rm test_network
|
||||||
1
test/compose/.gitignore
vendored
Normal file
1
test/compose/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
@@ -13,7 +13,7 @@ services:
|
|||||||
- backup_data:/data
|
- backup_data:/data
|
||||||
|
|
||||||
backup: &default_backup_service
|
backup: &default_backup_service
|
||||||
image: offen/docker-volume-backup:canary
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
restart: always
|
restart: always
|
||||||
@@ -26,6 +26,7 @@ services:
|
|||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
volumes:
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
32
test/compose/run.sh
Executable file
32
test/compose/run.sh
Executable file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v compose_backup_data:/data alpine \
|
||||||
|
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||||
|
|
||||||
|
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||||
|
|
||||||
|
echo "[TEST:PASS] Found relevant files in untared local backup."
|
||||||
|
|
||||||
|
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
||||||
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
|
docker-compose ps
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[TEST:PASS] All containers running post backup."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd $(dirname $0)
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
docker-compose exec backup backup
|
|
||||||
|
|
||||||
docker run --rm -it \
|
|
||||||
-v default_backup_data:/data alpine \
|
|
||||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
|
||||||
|
|
||||||
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
|
||||||
echo "Expected all containers to be running post backup, instead seen:"
|
|
||||||
docker-compose ps
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down --volumes
|
|
||||||
|
|
||||||
echo "Test passed"
|
|
||||||
63
test/swarm/docker-compose.yml
Normal file
63
test/swarm/docker-compose.yml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||||
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
minio:
|
||||||
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
MINIO_ROOT_USER: test
|
||||||
|
MINIO_ROOT_PASSWORD: test
|
||||||
|
MINIO_ACCESS_KEY: test
|
||||||
|
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||||
|
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||||
|
volumes:
|
||||||
|
- backup_data:/data
|
||||||
|
|
||||||
|
backup: &default_backup_service
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
|
depends_on:
|
||||||
|
- minio
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
AWS_ACCESS_KEY_ID: test
|
||||||
|
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||||
|
AWS_ENDPOINT: minio:9000
|
||||||
|
AWS_ENDPOINT_PROTO: http
|
||||||
|
AWS_S3_BUCKET_NAME: backup
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
volumes:
|
||||||
|
- pg_data:/backup/pg_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
deploy:
|
||||||
|
replicas: 2
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
pg:
|
||||||
|
image: postgres:12.2-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: example
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- pg_data:/var/lib/postgresql/data
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
backup_data:
|
||||||
|
pg_data:
|
||||||
36
test/swarm/run.sh
Executable file
36
test/swarm/run.sh
Executable file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
docker swarm init
|
||||||
|
|
||||||
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
|
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v test_stack_backup_data:/data alpine \
|
||||||
|
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
||||||
|
|
||||||
|
echo "[TEST:PASS] Found relevant files in untared backup."
|
||||||
|
|
||||||
|
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
||||||
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
|
docker ps -a
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[TEST:PASS] All containers running post backup."
|
||||||
|
|
||||||
|
docker stack rm test_stack
|
||||||
|
|
||||||
|
docker swarm leave --force
|
||||||
17
test/test.sh
Executable file
17
test/test.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
TEST_VERSION=${1:-canary}
|
||||||
|
|
||||||
|
for dir in $(ls -d -- */); do
|
||||||
|
test="${dir}run.sh"
|
||||||
|
echo "################################################"
|
||||||
|
echo "Now running $test"
|
||||||
|
echo "################################################"
|
||||||
|
echo ""
|
||||||
|
TEST_VERSION=$TEST_VERSION /bin/sh $test
|
||||||
|
echo ""
|
||||||
|
echo "$test passed"
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
Reference in New Issue
Block a user