mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 09:08:02 +01:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dda71c3a5d | ||
|
|
a8f013e16a | ||
|
|
44d65c1a67 | ||
|
|
88d4326e61 | ||
|
|
6d3e43680c | ||
|
|
6ce197696a | ||
|
|
ac3a231d2b | ||
|
|
054ab8fbe6 | ||
|
|
fa356137e8 | ||
|
|
07befda44d | ||
|
|
c33ebc0c70 | ||
|
|
23c287bfc7 | ||
|
|
5be3c36040 | ||
|
|
57afad5727 | ||
|
|
bafca7bb85 | ||
|
|
84afc43fd8 | ||
|
|
1af345061c | ||
|
|
5368eb8c5e | ||
|
|
5978a897ad | ||
|
|
203bad3427 |
@@ -1,40 +1,62 @@
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: cimg/base:2020.06
|
||||
canary:
|
||||
machine:
|
||||
image: ubuntu-1604:202007-01
|
||||
working_directory: ~/docker-volume-backup
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: Build
|
||||
command: make build
|
||||
- run:
|
||||
name: Check if image needs to be pushed
|
||||
command: |
|
||||
if [[ -z "$CIRCLE_TAG" ]]; then
|
||||
echo "Not a git tag, nothing to do ..."
|
||||
circleci-agent step halt
|
||||
fi
|
||||
docker build . -t offen/docker-volume-backup:canary
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
for test in test/**/test.sh; do
|
||||
/bin/sh $test
|
||||
done
|
||||
|
||||
build:
|
||||
docker:
|
||||
- image: cimg/base:2020.06
|
||||
environment:
|
||||
DOCKER_BUILDKIT: '1'
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
working_directory: ~/docker-volume-backup
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 20.10.6
|
||||
- docker/install-docker-credential-helper
|
||||
- docker/configure-docker-credentials-store
|
||||
- run:
|
||||
name: Push to Docker Hub
|
||||
command: |
|
||||
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
|
||||
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:$CIRCLE_TAG
|
||||
docker tag offen/docker-volume-backup:local offen/docker-volume-backup:latest
|
||||
docker push offen/docker-volume-backup:$CIRCLE_TAG
|
||||
docker push offen/docker-volume-backup:latest
|
||||
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
|
||||
docker run --rm --privileged linuxkit/binfmt:v0.8
|
||||
docker context create docker-volume-backup
|
||||
docker buildx create docker-volume-backup --name docker-volume-backup --use
|
||||
docker buildx inspect --bootstrap
|
||||
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
-t offen/docker-volume-backup:$CIRCLE_TAG \
|
||||
-t offen/docker-volume-backup:latest \
|
||||
. --push
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
deploy:
|
||||
docker_image:
|
||||
jobs:
|
||||
- canary:
|
||||
filters:
|
||||
tags:
|
||||
ignore: /^v.*/
|
||||
- build:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v.*/
|
||||
|
||||
|
||||
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
test
|
||||
14
Dockerfile
14
Dockerfile
@@ -1,16 +1,20 @@
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MIT
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
FROM alpine:3.13
|
||||
FROM golang:1.16-alpine as builder
|
||||
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
|
||||
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
|
||||
|
||||
FROM alpine:3.14
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apk add --update ca-certificates docker openrc gnupg
|
||||
RUN update-ca-certificates
|
||||
RUN rc-update add docker boot
|
||||
|
||||
RUN wget https://dl.min.io/client/mc/release/linux-amd64/mc && \
|
||||
chmod +x mc && \
|
||||
mv mc /usr/bin/mc
|
||||
COPY --from=builder /go/bin/mc /usr/bin/mc
|
||||
RUN mc --version
|
||||
|
||||
COPY src/backup.sh src/entrypoint.sh /root/
|
||||
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \
|
||||
|
||||
5
Makefile
5
Makefile
@@ -1,5 +0,0 @@
|
||||
DOCKER_TAG ?= local
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
@docker build -t offen/docker-volume-backup:$(DOCKER_TAG) .
|
||||
36
README.md
36
README.md
@@ -38,11 +38,39 @@ AWS_S3_BUCKET_NAME="<xxx>"
|
||||
|
||||
# BACKUP_RETENTION_DAYS="7"
|
||||
|
||||
# In case the duration a backup takes fluctuates noticeably in your setup
|
||||
# you can adjust this setting to make sure there are no race conditions
|
||||
# between the backup finishing and the pruning not deleting backups that
|
||||
# sit on the very edge of the time window. Set this value to a duration
|
||||
# that is expected to be bigger than the maximum difference of backups.
|
||||
# Valid values have a suffix of (s)econds, (m)inutes, (h)ours, or (d)ays.
|
||||
|
||||
# BACKUP_PRUNING_LEEWAY="10m"
|
||||
|
||||
########### BACKUP ENCRYPTION
|
||||
|
||||
# Backups can be encrypted using gpg in case a passphrase is given
|
||||
|
||||
# GPG_PASSPHRASE="<xxx>"
|
||||
|
||||
########### STOPPING CONTAINERS DURING BACKUP
|
||||
|
||||
# Containers can be stopped by applying a
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers
|
||||
# that are labeled with `true` will be stopped. If you need more fine grained
|
||||
# control (e.g. when running multiple containers based on this image), you can
|
||||
# override this default by specifying a different value here.
|
||||
|
||||
# BACKUP_STOP_CONTAINER_LABEL="service1"
|
||||
|
||||
########### MINIO CLIENT CONFIGURATION
|
||||
|
||||
# Pass these additional flags to all MinIO client `mc` invocations.
|
||||
# This can be used for example to pass `--insecure` when using self
|
||||
# signed certificates, or passing `--debug` to gain insights on
|
||||
# unexpected behavior.
|
||||
|
||||
# MC_GLOBAL_OPTIONS="<xxx>"
|
||||
```
|
||||
|
||||
## Example in a docker-compose setup
|
||||
@@ -78,6 +106,12 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using with Docker Swarm
|
||||
|
||||
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped. If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions. A restart policy of `always` is not compatible with this tool.
|
||||
|
||||
---
|
||||
|
||||
## Differences to `futurice/docker-volume-backup`
|
||||
|
||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
@@ -86,3 +120,5 @@ This image is heavily inspired by the `futurice/docker-volume-backup`. We decide
|
||||
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
|
||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
|
||||
- InfluxDB specific functionality was removed.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
|
||||
|
||||
if [ -S "$DOCKER_SOCK" ]; then
|
||||
TEMPFILE="$(mktemp)"
|
||||
docker ps \
|
||||
--format "{{.ID}}" \
|
||||
--filter "label=docker-volume-backup.stop-during-backup=true" \
|
||||
docker ps -q \
|
||||
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
|
||||
> "$TEMPFILE"
|
||||
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
|
||||
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
|
||||
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
|
||||
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
|
||||
rm "$TEMPFILE"
|
||||
echo "$CONTAINERS_TOTAL containers running on host in total."
|
||||
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
|
||||
@@ -39,7 +38,7 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
||||
fi
|
||||
|
||||
info "Creating backup"
|
||||
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
|
||||
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
|
||||
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
|
||||
|
||||
if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||
@@ -51,14 +50,37 @@ if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||
fi
|
||||
|
||||
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
||||
info "Starting containers back up"
|
||||
docker start $CONTAINERS_TO_STOP
|
||||
info "Starting containers/services back up"
|
||||
# The container might be part of a stack when running in swarm mode, so
|
||||
# its parent service needs to be restarted instead once backup is finished.
|
||||
SERVICES_REQUIRING_UPDATE=""
|
||||
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
|
||||
SWARM_SERVICE_NAME=$(
|
||||
docker inspect \
|
||||
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
|
||||
$CONTAINER_ID
|
||||
)
|
||||
if [ -z "$SWARM_SERVICE_NAME" ]; then
|
||||
echo "Restarting $(docker start $CONTAINER_ID)"
|
||||
else
|
||||
echo "Removing $(docker rm $CONTAINER_ID)"
|
||||
# Multiple containers might belong to the same service, so they will
|
||||
# be restarted only after all names are known.
|
||||
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
|
||||
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
|
||||
docker service update --force $SERVICE_NAME
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||
info "Uploading backup to remote storage"
|
||||
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\"."
|
||||
mc cp "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
|
||||
mc cp $MC_GLOBAL_OPTIONS "$BACKUP_FILENAME" "backup-target/$AWS_S3_BUCKET_NAME"
|
||||
echo "Upload finished."
|
||||
fi
|
||||
|
||||
@@ -73,16 +95,22 @@ echo "Will wait for next scheduled backup."
|
||||
if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||
info "Pruning old backups"
|
||||
echo "Sleeping ${BACKUP_PRUNING_LEEWAY} before checking eligibility."
|
||||
sleep "$BACKUP_PRUNING_LEEWAY"
|
||||
bucket=$AWS_S3_BUCKET_NAME
|
||||
|
||||
rule_applies_to=$(mc rm --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
|
||||
rule_applies_to=$(
|
||||
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
|
||||
--older-than "${BACKUP_RETENTION_DAYS}d" \
|
||||
"backup-target/$bucket" \
|
||||
| wc -l
|
||||
)
|
||||
if [ "$rule_applies_to" == "0" ]; then
|
||||
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
|
||||
echo "Doing nothing."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
total=$(mc ls "backup-target/$bucket" | wc -l)
|
||||
total=$(mc ls $MC_GLOBAL_OPTIONS "backup-target/$bucket" | wc -l)
|
||||
|
||||
if [ "$rule_applies_to" == "$total" ]; then
|
||||
echo "Using a retention of ${BACKUP_RETENTION_DAYS} days would prune all currently existing backups, will not continue."
|
||||
@@ -90,6 +118,8 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc rm --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
||||
mc rm $MC_GLOBAL_OPTIONS \
|
||||
--recursive -force \
|
||||
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
||||
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
||||
fi
|
||||
|
||||
@@ -19,13 +19,18 @@ BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
||||
|
||||
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
||||
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
||||
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
|
||||
|
||||
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
||||
|
||||
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
||||
|
||||
MC_GLOBAL_OPTIONS="${MC_GLOBAL_OPTIONS:-}"
|
||||
EOF
|
||||
chmod a+x env.sh
|
||||
source env.sh
|
||||
|
||||
mc alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||
|
||||
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||
|
||||
42
test/default/docker-compose.yml
Normal file
42
test/default/docker-compose.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
environment:
|
||||
MINIO_ROOT_USER: test
|
||||
MINIO_ROOT_PASSWORD: test
|
||||
MINIO_ACCESS_KEY: test
|
||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||
volumes:
|
||||
- backup_data:/data
|
||||
|
||||
backup: &default_backup_service
|
||||
image: offen/docker-volume-backup:canary
|
||||
depends_on:
|
||||
- minio
|
||||
restart: always
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||
AWS_ENDPOINT: minio:9000
|
||||
AWS_ENDPOINT_PROTO: http
|
||||
AWS_S3_BUCKET_NAME: backup
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
|
||||
volumes:
|
||||
backup_data:
|
||||
app_data:
|
||||
24
test/default/test.sh
Executable file
24
test/default/test.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
docker-compose up -d
|
||||
sleep 5
|
||||
|
||||
docker-compose exec backup backup
|
||||
|
||||
docker run --rm -it \
|
||||
-v default_backup_data:/data alpine \
|
||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||
|
||||
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
||||
echo "Expected all containers to be running post backup, instead seen:"
|
||||
docker-compose ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker-compose down --volumes
|
||||
|
||||
echo "Test passed"
|
||||
Reference in New Issue
Block a user