mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c7ffc3d99 | ||
|
|
f6b40742b4 | ||
|
|
767c21ef65 | ||
|
|
4b59089e3d | ||
|
|
8e90ce408a | ||
|
|
510ae889e4 | ||
|
|
e4bb183afa | ||
|
|
5fd6f66324 | ||
|
|
da75d232f4 | ||
|
|
8a385d22aa | ||
|
|
a3d7af2b42 | ||
|
|
c01555f052 | ||
|
|
d29d0d7399 | ||
|
|
a91353742d | ||
|
|
dda71c3a5d | ||
|
|
a8f013e16a | ||
|
|
44d65c1a67 | ||
|
|
88d4326e61 | ||
|
|
6d3e43680c | ||
|
|
6ce197696a | ||
|
|
ac3a231d2b | ||
|
|
054ab8fbe6 | ||
|
|
fa356137e8 | ||
|
|
07befda44d | ||
|
|
c33ebc0c70 | ||
|
|
23c287bfc7 | ||
|
|
5be3c36040 | ||
|
|
57afad5727 | ||
|
|
bafca7bb85 |
@@ -1,40 +1,61 @@
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: cimg/base:2020.06
|
||||
canary:
|
||||
machine:
|
||||
image: ubuntu-1604:202007-01
|
||||
working_directory: ~/docker-volume-backup
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: Build
|
||||
command: make build
|
||||
- run:
|
||||
name: Check if image needs to be pushed
|
||||
command: |
|
||||
if [[ -z "$CIRCLE_TAG" ]]; then
|
||||
echo "Not a git tag, nothing to do ..."
|
||||
circleci-agent step halt
|
||||
fi
|
||||
docker build . -t offen/docker-volume-backup:canary
|
||||
- run:
|
||||
name: Run tests
|
||||
working_directory: ~/docker-volume-backup/test
|
||||
command: |
|
||||
./test.sh canary
|
||||
|
||||
build:
|
||||
docker:
|
||||
- image: cimg/base:2020.06
|
||||
environment:
|
||||
DOCKER_BUILDKIT: '1'
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
working_directory: ~/docker-volume-backup
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 20.10.6
|
||||
- docker/install-docker-credential-helper
|
||||
- docker/configure-docker-credentials-store
|
||||
- run:
|
||||
name: Push to Docker Hub
|
||||
command: |
|
||||
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
|
||||
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:$CIRCLE_TAG
|
||||
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:latest
|
||||
docker push offen/docker-volume-backup:$CIRCLE_TAG
|
||||
docker push offen/docker-volume-backup:latest
|
||||
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
|
||||
docker run --rm --privileged linuxkit/binfmt:v0.8
|
||||
docker context create docker-volume-backup
|
||||
docker buildx create docker-volume-backup --name docker-volume-backup --use
|
||||
docker buildx inspect --bootstrap
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-t offen/docker-volume-backup:$CIRCLE_TAG \
|
||||
-t offen/docker-volume-backup:latest \
|
||||
. --push
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
deploy:
|
||||
docker_image:
|
||||
jobs:
|
||||
- canary:
|
||||
filters:
|
||||
tags:
|
||||
ignore: /^v.*/
|
||||
- build:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v.*/
|
||||
|
||||
|
||||
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
test
|
||||
14
Dockerfile
14
Dockerfile
@@ -1,16 +1,20 @@
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
FROM alpine:3.13
|
||||
FROM golang:1.17-alpine as builder
|
||||
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
|
||||
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
|
||||
|
||||
FROM alpine:3.14
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apk add --update ca-certificates docker openrc gnupg
|
||||
RUN update-ca-certificates
|
||||
RUN rc-update add docker boot
|
||||
|
||||
RUN wget https://dl.min.io/client/mc/release/linux-amd64/mc && \
|
||||
chmod +x mc && \
|
||||
mv mc /usr/bin/mc
|
||||
COPY --from=builder /go/bin/mc /usr/bin/mc
|
||||
RUN mc --version
|
||||
|
||||
COPY src/backup.sh src/entrypoint.sh /root/
|
||||
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \
|
||||
|
||||
5
Makefile
5
Makefile
@@ -1,5 +0,0 @@
|
||||
DOCKER_TAG ?= local
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
@docker build -t offen/docker-volume-backup:$(DOCKER_TAG) .
|
||||
76
README.md
76
README.md
@@ -1,8 +1,8 @@
|
||||
# docker-volume-backup
|
||||
|
||||
Backup Docker volumes to any S3 compatible storage.
|
||||
Backup Docker volumes locally or to any S3 compatible storage.
|
||||
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to any S3 compatible storage and rotates away old backups if configured.
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -15,7 +15,7 @@ Backup targets, schedule and retention are configured in environment variables:
|
||||
# template expression.
|
||||
|
||||
BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||
BACKUP_FILENAME="offen-db-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||
BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||
|
||||
########### BACKUP STORAGE
|
||||
|
||||
@@ -28,11 +28,35 @@ AWS_SECRET_ACCESS_KEY="<xxx>"
|
||||
AWS_S3_BUCKET_NAME="<xxx>"
|
||||
|
||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||
# Do not set this when working against AWS S3.
|
||||
# Do not set this when working against AWS S3. If you need to set a
|
||||
# specific protocol, you will need to use the option below.
|
||||
|
||||
# AWS_ENDPOINT="<xxx>"
|
||||
|
||||
# The protocol to be used when communicating with your storage server.
|
||||
# Defaults to "https". You can set this to "http" when communicating with
|
||||
# a different Docker container on the same host for example.
|
||||
|
||||
# AWS_ENDPOINT_PROTO="https"
|
||||
|
||||
# In addition to backing up you can also store backups locally. Pass in
|
||||
# a local path to store your backups here if needed. You likely want to
|
||||
# mount a local folder or Docker volume into that location when running
|
||||
# the container. Local paths can also be subject to pruning of old
|
||||
# backups as defined below.
|
||||
|
||||
# BACKUP_ARCHIVE="/archive"
|
||||
|
||||
########### BACKUP PRUNING
|
||||
|
||||
# **IMPORTANT, PLEASE READ THIS BEFORE USING THIS FEATURE**:
|
||||
# The mechanism used for pruning backups is not very sophisticated
|
||||
# and applies its rules to **all files in the target directory**,
|
||||
# which means that if you are storing your backups next to other files,
|
||||
# these might become subject to deletion too. When using this option
|
||||
# make sure the backup files are stored in a directory used exclusively
|
||||
# for storing them or you might lose data.
|
||||
|
||||
# Define this value to enable automatic pruning of old backups. The value
|
||||
# declares the number of days for which a backup is kept.
|
||||
|
||||
@@ -53,6 +77,16 @@ AWS_S3_BUCKET_NAME="<xxx>"
|
||||
|
||||
# GPG_PASSPHRASE="<xxx>"
|
||||
|
||||
########### STOPPING CONTAINERS DURING BACKUP
|
||||
|
||||
# Containers can be stopped by applying a
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers
|
||||
# that are labeled with `true` will be stopped. If you need more fine grained
|
||||
# control (e.g. when running multiple containers based on this image), you can
|
||||
# override this default by specifying a different value here.
|
||||
|
||||
# BACKUP_STOP_CONTAINER_LABEL="service1"
|
||||
|
||||
########### MINIO CLIENT CONFIGURATION
|
||||
|
||||
# Pass these additional flags to all MinIO client `mc` invocations.
|
||||
@@ -92,10 +126,42 @@ services:
|
||||
# to stop the container
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- data:/backup/my-app-backup:ro
|
||||
# If you mount a local directory or volume to `/archive` a local
|
||||
# copy of the backup will be stored there. You can override the
|
||||
# location inside of the container by setting `BACKUP_ARCHIVE`
|
||||
# - /path/to/local_backups:/archive
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using with Docker Swarm
|
||||
|
||||
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped. If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions. A restart policy of `always` is not compatible with this tool.
|
||||
|
||||
---
|
||||
|
||||
When running in Swarm mode, it's also advised to set a hard memory limit on your service (~25MB should be enough in most cases, but if you backup large files above half a gigabyte or similar, you might have to raise this in case the backup exits with `Killed`):
|
||||
|
||||
```yml
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:latest
|
||||
deployment:
|
||||
resources:
|
||||
limits:
|
||||
memory: 25M
|
||||
```
|
||||
|
||||
## Manually triggering a backup
|
||||
|
||||
You can manually trigger a backup run outside of the defined cron schedule by executing the `backup` command inside the container:
|
||||
|
||||
```
|
||||
docker exec <container_ref> backup
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Differences to `futurice/docker-volume-backup`
|
||||
|
||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
@@ -104,3 +170,5 @@ This image is heavily inspired by the `futurice/docker-volume-backup`. We decide
|
||||
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
|
||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
|
||||
- InfluxDB specific functionality was removed.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
|
||||
|
||||
if [ -S "$DOCKER_SOCK" ]; then
|
||||
TEMPFILE="$(mktemp)"
|
||||
docker ps \
|
||||
--format "{{.ID}}" \
|
||||
--filter "label=docker-volume-backup.stop-during-backup=true" \
|
||||
docker ps -q \
|
||||
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
|
||||
> "$TEMPFILE"
|
||||
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
|
||||
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
|
||||
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
|
||||
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
|
||||
rm "$TEMPFILE"
|
||||
echo "$CONTAINERS_TOTAL containers running on host in total."
|
||||
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
|
||||
@@ -39,7 +38,7 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
||||
fi
|
||||
|
||||
info "Creating backup"
|
||||
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
|
||||
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
|
||||
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
|
||||
|
||||
if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||
@@ -51,17 +50,51 @@ if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||
fi
|
||||
|
||||
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
||||
info "Starting containers back up"
|
||||
docker start $CONTAINERS_TO_STOP
|
||||
info "Starting containers/services back up"
|
||||
# The container might be part of a stack when running in swarm mode, so
|
||||
# its parent service needs to be restarted instead once backup is finished.
|
||||
SERVICES_REQUIRING_UPDATE=""
|
||||
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
|
||||
SWARM_SERVICE_NAME=$(
|
||||
docker inspect \
|
||||
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
|
||||
$CONTAINER_ID
|
||||
)
|
||||
if [ -z "$SWARM_SERVICE_NAME" ]; then
|
||||
echo "Restarting $(docker start $CONTAINER_ID)"
|
||||
else
|
||||
echo "Removing $(docker rm $CONTAINER_ID)"
|
||||
# Multiple containers might belong to the same service, so they will
|
||||
# be restarted only after all names are known.
|
||||
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
|
||||
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
|
||||
docker service update --force $SERVICE_NAME
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
copy_backup () {
|
||||
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "$1"
|
||||
}
|
||||
|
||||
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||
info "Uploading backup to remote storage"
|
||||
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
|
||||
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
|
||||
copy_backup "backup-target/$AWS_S3_BUCKET_NAME"
|
||||
echo "Upload finished."
|
||||
fi
|
||||
|
||||
if [ -d "$BACKUP_ARCHIVE" ]; then
|
||||
info "Copying backup to local archive"
|
||||
echo "Will copy to \"$BACKUP_ARCHIVE\"."
|
||||
copy_backup "$BACKUP_ARCHIVE"
|
||||
echo "Finished copying."
|
||||
fi
|
||||
|
||||
if [ -f "$BACKUP_FILENAME" ]; then
|
||||
info "Cleaning up"
|
||||
rm -vf "$BACKUP_FILENAME"
|
||||
@@ -70,20 +103,21 @@ fi
|
||||
info "Backup finished"
|
||||
echo "Will wait for next scheduled backup."
|
||||
|
||||
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||
info "Pruning old backups"
|
||||
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
|
||||
sleep "$BACKUP_PRUNING_LEEWAY"
|
||||
bucket=$AWS_S3_BUCKET_NAME
|
||||
|
||||
rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
|
||||
prune () {
|
||||
target=$1
|
||||
rule_applies_to=$(
|
||||
mc rm $MC_GLOBAL_OPTIONS --fake --recursive --force \
|
||||
--older-than "${BACKUP_RETENTION_DAYS}d" \
|
||||
"$target" \
|
||||
| wc -l
|
||||
)
|
||||
if [ "$rule_applies_to" == "0" ]; then
|
||||
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
|
||||
echo "Doing nothing."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
|
||||
total=$(mc ls $MC_GLOBAL_OPTIONS "$target" | wc -l)
|
||||
|
||||
if [ "$rule_applies_to" == "$total" ]; then
|
||||
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
|
||||
@@ -91,6 +125,22 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
||||
mc rm $MC_GLOBAL_OPTIONS \
|
||||
--recursive --force \
|
||||
--older-than "${BACKUP_RETENTION_DAYS}d" "$target"
|
||||
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
||||
}
|
||||
|
||||
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||
info "Pruning old backups"
|
||||
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
|
||||
sleep "$BACKUP_PRUNING_LEEWAY"
|
||||
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||
info "Pruning old backups from remote storage"
|
||||
prune "backup-target/$bucket"
|
||||
fi
|
||||
if [ -d "$BACKUP_ARCHIVE" ]; then
|
||||
info "Pruning old backups from local archive"
|
||||
prune "$BACKUP_ARCHIVE"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -12,13 +12,16 @@ set -e
|
||||
cat <<EOF > env.sh
|
||||
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
BACKUP_FILENAME=${BACKUP_FILENAME:-"backup-%Y-%m-%dT%H-%M-%S.tar.gz"}
|
||||
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
|
||||
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
|
||||
|
||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
||||
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
||||
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
||||
|
||||
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
||||
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
||||
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
|
||||
|
||||
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
||||
|
||||
@@ -27,7 +30,11 @@ EOF
|
||||
chmod a+x env.sh
|
||||
source env.sh
|
||||
|
||||
mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
if [ ! -z "$AWS_ACCESS_KEY_ID" ] && [ ! -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||
mc $MC_GLOBAL_OPTIONS alias set backup-target \
|
||||
"$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" \
|
||||
"$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
fi
|
||||
|
||||
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||
|
||||
64
test/cli/run.sh
Executable file
64
test/cli/run.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
docker network create test_network
|
||||
docker volume create backup_data
|
||||
docker volume create app_data
|
||||
|
||||
docker run -d \
|
||||
--name minio \
|
||||
--network test_network \
|
||||
--env MINIO_ROOT_USER=test \
|
||||
--env MINIO_ROOT_PASSWORD=test \
|
||||
--env MINIO_ACCESS_KEY=test \
|
||||
--env MINIO_SECRET_KEY=GMusLtUmILge2by+z890kQ \
|
||||
-v backup_data:/data \
|
||||
minio/minio:RELEASE.2020-08-04T23-10-51Z server /data
|
||||
|
||||
docker exec minio mkdir -p /data/backup
|
||||
|
||||
docker run -d \
|
||||
--name offen \
|
||||
--network test_network \
|
||||
--label "docker-volume-backup.stop-during-backup=true" \
|
||||
-v app_data:/var/opt/offen/ \
|
||||
offen/offen:latest
|
||||
|
||||
sleep 10
|
||||
|
||||
docker run -d \
|
||||
--name backup \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
--env AWS_ACCESS_KEY_ID=test \
|
||||
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||
--env AWS_ENDPOINT=minio:9000 \
|
||||
--env AWS_ENDPOINT_PROTO=http \
|
||||
--env AWS_S3_BUCKET_NAME=backup \
|
||||
--env BACKUP_FILENAME=test.tar.gz \
|
||||
--env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
|
||||
offen/docker-volume-backup:$TEST_VERSION
|
||||
|
||||
docker exec backup backup
|
||||
|
||||
docker run --rm -it \
|
||||
-v backup_data:/data alpine \
|
||||
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
||||
|
||||
if [ "$(docker ps -q | wc -l)" != "3" ]; then
|
||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[TEST:PASS] All containers running post backup."
|
||||
|
||||
docker rm $(docker stop minio offen backup)
|
||||
docker volume rm backup_data app_data
|
||||
docker network rm test_network
|
||||
1
test/compose/.gitignore
vendored
Normal file
1
test/compose/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
43
test/compose/docker-compose.yml
Normal file
43
test/compose/docker-compose.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
environment:
|
||||
MINIO_ROOT_USER: test
|
||||
MINIO_ROOT_PASSWORD: test
|
||||
MINIO_ACCESS_KEY: test
|
||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||
volumes:
|
||||
- backup_data:/data
|
||||
|
||||
backup: &default_backup_service
|
||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||
depends_on:
|
||||
- minio
|
||||
restart: always
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||
AWS_ENDPOINT: minio:9000
|
||||
AWS_ENDPOINT_PROTO: http
|
||||
AWS_S3_BUCKET_NAME: backup
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
volumes:
|
||||
- ./local:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
|
||||
volumes:
|
||||
backup_data:
|
||||
app_data:
|
||||
32
test/compose/run.sh
Executable file
32
test/compose/run.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
mkdir -p local
|
||||
|
||||
docker-compose up -d
|
||||
sleep 5
|
||||
|
||||
docker-compose exec backup backup
|
||||
|
||||
docker run --rm -it \
|
||||
-v compose_backup_data:/data alpine \
|
||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
||||
|
||||
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared local backup."
|
||||
|
||||
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||
docker-compose ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[TEST:PASS] All containers running post backup."
|
||||
|
||||
docker-compose down --volumes
|
||||
63
test/swarm/docker-compose.yml
Normal file
63
test/swarm/docker-compose.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
environment:
|
||||
MINIO_ROOT_USER: test
|
||||
MINIO_ROOT_PASSWORD: test
|
||||
MINIO_ACCESS_KEY: test
|
||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||
volumes:
|
||||
- backup_data:/data
|
||||
|
||||
backup: &default_backup_service
|
||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||
depends_on:
|
||||
- minio
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||
AWS_ENDPOINT: minio:9000
|
||||
AWS_ENDPOINT_PROTO: http
|
||||
AWS_S3_BUCKET_NAME: backup
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
deploy:
|
||||
replicas: 2
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
pg:
|
||||
image: postgres:12.2-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: example
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
volumes:
|
||||
backup_data:
|
||||
pg_data:
|
||||
36
test/swarm/run.sh
Executable file
36
test/swarm/run.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 20
|
||||
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
docker run --rm -it \
|
||||
-v test_stack_backup_data:/data alpine \
|
||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
||||
|
||||
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||
docker ps -a
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[TEST:PASS] All containers running post backup."
|
||||
|
||||
docker stack rm test_stack
|
||||
|
||||
docker swarm leave --force
|
||||
17
test/test.sh
Executable file
17
test/test.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
TEST_VERSION=${1:-canary}
|
||||
|
||||
for dir in $(ls -d -- */); do
|
||||
test="${dir}run.sh"
|
||||
echo "################################################"
|
||||
echo "Now running $test"
|
||||
echo "################################################"
|
||||
echo ""
|
||||
TEST_VERSION=$TEST_VERSION /bin/sh $test
|
||||
echo ""
|
||||
echo "$test passed"
|
||||
echo ""
|
||||
done
|
||||
Reference in New Issue
Block a user