Compare commits

...

10 Commits

Author SHA1 Message Date
Frederik Ring
dda71c3a5d fix distinct service names ending up in joint service token 2021-07-10 08:09:55 +02:00
Frederik Ring
a8f013e16a Merge pull request #8 from offen/some-tests
Add integration test
2021-07-09 10:00:51 +02:00
Frederik Ring
44d65c1a67 assert that all containers are back up and running after backup 2021-07-09 09:57:05 +02:00
Frederik Ring
88d4326e61 define cron schedule that never runs 2021-07-09 09:50:59 +02:00
Frederik Ring
6d3e43680c add basic integration test 2021-07-09 09:47:07 +02:00
Frederik Ring
6ce197696a scaffold basic testing in CI 2021-07-09 08:15:27 +02:00
Frederik Ring
ac3a231d2b Merge pull request #7 from offen/swarm-support
Support Docker in Swarm mode
2021-07-08 20:07:06 +02:00
Frederik Ring
054ab8fbe6 when stopped container was part of a stack service, update service instead 2021-07-08 19:54:04 +02:00
Frederik Ring
fa356137e8 inject proper version for mc command at compile time 2021-07-03 10:25:26 +02:00
Frederik Ring
07befda44d build mc from source, support arm/v7 2021-07-01 15:16:39 +02:00
8 changed files with 128 additions and 30 deletions

View File

@@ -2,29 +2,21 @@ version: 2.1
jobs:
canary:
docker:
- image: cimg/base:2020.06
machine:
image: ubuntu-1604:202007-01
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Create container from image
name: Run tests
command: |
docker run -d offen/docker-volume-backup:canary
echo "Sleeping for 30s before checking if container is still running."
sleep 30
count=$(docker ps -q | wc -l)
if [[ $count != "1" ]]; then
echo "Expected one container to be running, found $count."
exit 1
fi
docker stop $(docker ps -q)
for test in test/**/test.sh; do
/bin/sh $test
done
build:
docker:
@@ -48,7 +40,7 @@ jobs:
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
docker buildx build --platform linux/arm64,linux/amd64 \
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
-t offen/docker-volume-backup:$CIRCLE_TAG \
-t offen/docker-volume-backup:latest \
. --push

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
test

View File

@@ -1,5 +1,9 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MIT
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.16-alpine as builder
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
FROM alpine:3.14
@@ -9,10 +13,8 @@ RUN apk add --update ca-certificates docker openrc gnupg
RUN update-ca-certificates
RUN rc-update add docker boot
ARG TARGETARCH=amd64
RUN wget https://dl.min.io/client/mc/release/linux-$TARGETARCH/mc && \
chmod +x mc && \
mv mc /usr/bin/mc
COPY --from=builder /go/bin/mc /usr/bin/mc
RUN mc --version
COPY src/backup.sh src/entrypoint.sh /root/
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \

View File

@@ -106,6 +106,12 @@ volumes:
data:
```
## Using with Docker Swarm
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped. If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions. A restart policy of `always` is not compatible with this tool.
---
## Differences to `futurice/docker-volume-backup`
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
@@ -114,4 +120,5 @@ This image is heavily inspired by the `futurice/docker-volume-backup`. We decide
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
- InfluxDB specific functionality was removed.
- `arm64` Architecture is supported.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.

View File

@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--format "{{.ID}}" \
docker ps -q \
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
> "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total."
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
@@ -39,7 +38,7 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
fi
info "Creating backup"
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
if [ ! -z "$GPG_PASSPHRASE" ]; then
@@ -51,8 +50,31 @@ if [ ! -z "$GPG_PASSPHRASE" ]; then
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Starting containers back up"
docker start $CONTAINERS_TO_STOP
info "Starting containers/services back up"
# The container might be part of a stack when running in swarm mode, so
# its parent service needs to be restarted instead once backup is finished.
SERVICES_REQUIRING_UPDATE=""
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
SWARM_SERVICE_NAME=$(
docker inspect \
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
$CONTAINER_ID
)
if [ -z "$SWARM_SERVICE_NAME" ]; then
echo "Restarting $(docker start $CONTAINER_ID)"
else
echo "Removing $(docker rm $CONTAINER_ID)"
# Multiple containers might belong to the same service, so they will
# be restarted only after all names are known.
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
fi
done
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
docker service update --force $SERVICE_NAME
done
fi
fi
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
@@ -76,7 +98,12 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
sleep "$BACKUP_PRUNING_LEEWAY"
bucket=$AWS_S3_BUCKET_NAME
rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
rule_applies_to=$(
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
--older-than "${BACKUP_RETENTION_DAYS}d" \
"backup-target/$bucket" \
| wc -l
)
if [ "$rule_applies_to" == "0" ]; then
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
echo "Doing nothing."
@@ -91,6 +118,8 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
exit 1
fi
mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
mc rm $MC_GLOBAL_OPTIONS \
--recursive -force \
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
fi

View File

@@ -19,6 +19,7 @@ BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
@@ -29,7 +30,7 @@ EOF
chmod a+x env.sh
source env.sh
mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."

View File

@@ -0,0 +1,42 @@
version: '3'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup: &default_backup_service
image: offen/docker-volume-backup:canary
depends_on:
- minio
restart: always
environment:
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
backup_data:
app_data:

24
test/default/test.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/sh
set -e
cd $(dirname $0)
docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v default_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
docker-compose down --volumes
echo "Test passed"