mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dda71c3a5d | ||
|
|
a8f013e16a | ||
|
|
44d65c1a67 | ||
|
|
88d4326e61 | ||
|
|
6d3e43680c | ||
|
|
6ce197696a | ||
|
|
ac3a231d2b | ||
|
|
054ab8fbe6 | ||
|
|
fa356137e8 | ||
|
|
07befda44d |
@@ -2,29 +2,21 @@ version: 2.1
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
canary:
|
canary:
|
||||||
docker:
|
machine:
|
||||||
- image: cimg/base:2020.06
|
image: ubuntu-1604:202007-01
|
||||||
working_directory: ~/docker-volume-backup
|
working_directory: ~/docker-volume-backup
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- setup_remote_docker:
|
|
||||||
version: 20.10.6
|
|
||||||
- run:
|
- run:
|
||||||
name: Build
|
name: Build
|
||||||
command: |
|
command: |
|
||||||
docker build . -t offen/docker-volume-backup:canary
|
docker build . -t offen/docker-volume-backup:canary
|
||||||
- run:
|
- run:
|
||||||
name: Create container from image
|
name: Run tests
|
||||||
command: |
|
command: |
|
||||||
docker run -d offen/docker-volume-backup:canary
|
for test in test/**/test.sh; do
|
||||||
echo "Sleeping for 30s before checking if container is still running."
|
/bin/sh $test
|
||||||
sleep 30
|
done
|
||||||
count=$(docker ps -q | wc -l)
|
|
||||||
if [[ $count != "1" ]]; then
|
|
||||||
echo "Expected one container to be running, found $count."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
docker stop $(docker ps -q)
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
docker:
|
docker:
|
||||||
@@ -48,7 +40,7 @@ jobs:
|
|||||||
docker context create docker-volume-backup
|
docker context create docker-volume-backup
|
||||||
docker buildx create docker-volume-backup --name docker-volume-backup --use
|
docker buildx create docker-volume-backup --name docker-volume-backup --use
|
||||||
docker buildx inspect --bootstrap
|
docker buildx inspect --bootstrap
|
||||||
docker buildx build --platform linux/arm64,linux/amd64 \
|
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||||
-t offen/docker-volume-backup:$CIRCLE_TAG \
|
-t offen/docker-volume-backup:$CIRCLE_TAG \
|
||||||
-t offen/docker-volume-backup:latest \
|
-t offen/docker-volume-backup:latest \
|
||||||
. --push
|
. --push
|
||||||
|
|||||||
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
|||||||
|
test
|
||||||
12
Dockerfile
12
Dockerfile
@@ -1,5 +1,9 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
FROM golang:1.16-alpine as builder
|
||||||
|
ARG MC_VERSION=RELEASE.2021-06-13T17-48-22Z
|
||||||
|
RUN go install -ldflags "-X github.com/minio/mc/cmd.ReleaseTag=$MC_VERSION" github.com/minio/mc@$MC_VERSION
|
||||||
|
|
||||||
FROM alpine:3.14
|
FROM alpine:3.14
|
||||||
|
|
||||||
@@ -9,10 +13,8 @@ RUN apk add --update ca-certificates docker openrc gnupg
|
|||||||
RUN update-ca-certificates
|
RUN update-ca-certificates
|
||||||
RUN rc-update add docker boot
|
RUN rc-update add docker boot
|
||||||
|
|
||||||
ARG TARGETARCH=amd64
|
COPY --from=builder /go/bin/mc /usr/bin/mc
|
||||||
RUN wget https://dl.min.io/client/mc/release/linux-$TARGETARCH/mc && \
|
RUN mc --version
|
||||||
chmod +x mc && \
|
|
||||||
mv mc /usr/bin/mc
|
|
||||||
|
|
||||||
COPY src/backup.sh src/entrypoint.sh /root/
|
COPY src/backup.sh src/entrypoint.sh /root/
|
||||||
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \
|
RUN chmod +x backup.sh && mv backup.sh /usr/bin/backup \
|
||||||
|
|||||||
@@ -106,6 +106,12 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Using with Docker Swarm
|
||||||
|
|
||||||
|
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped. If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions. A restart policy of `always` is not compatible with this tool.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Differences to `futurice/docker-volume-backup`
|
## Differences to `futurice/docker-volume-backup`
|
||||||
|
|
||||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
@@ -114,4 +120,5 @@ This image is heavily inspired by the `futurice/docker-volume-backup`. We decide
|
|||||||
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
|
- This image makes use of the MinIO client `mc` instead of the full blown AWS CLI for uploading backups.
|
||||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
|
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate old backups through the same script so this functionality can also be offered for non-AWS storage backends like MinIO.
|
||||||
- InfluxDB specific functionality was removed.
|
- InfluxDB specific functionality was removed.
|
||||||
- `arm64` Architecture is supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
|
- Docker in Swarm mode is supported.
|
||||||
|
|||||||
@@ -17,13 +17,12 @@ DOCKER_SOCK="/var/run/docker.sock"
|
|||||||
|
|
||||||
if [ -S "$DOCKER_SOCK" ]; then
|
if [ -S "$DOCKER_SOCK" ]; then
|
||||||
TEMPFILE="$(mktemp)"
|
TEMPFILE="$(mktemp)"
|
||||||
docker ps \
|
docker ps -q \
|
||||||
--format "{{.ID}}" \
|
|
||||||
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
|
--filter "label=docker-volume-backup.stop-during-backup=$BACKUP_STOP_CONTAINER_LABEL" \
|
||||||
> "$TEMPFILE"
|
> "$TEMPFILE"
|
||||||
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
|
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
|
||||||
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
|
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
|
||||||
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
|
CONTAINERS_TOTAL="$(docker ps -q | wc -l)"
|
||||||
rm "$TEMPFILE"
|
rm "$TEMPFILE"
|
||||||
echo "$CONTAINERS_TOTAL containers running on host in total."
|
echo "$CONTAINERS_TOTAL containers running on host in total."
|
||||||
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
|
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup."
|
||||||
@@ -39,7 +38,7 @@ if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
info "Creating backup"
|
info "Creating backup"
|
||||||
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
|
BACKUP_FILENAME="$(date +"$BACKUP_FILENAME")"
|
||||||
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
|
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
|
||||||
|
|
||||||
if [ ! -z "$GPG_PASSPHRASE" ]; then
|
if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||||
@@ -51,8 +50,31 @@ if [ ! -z "$GPG_PASSPHRASE" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
|
||||||
info "Starting containers back up"
|
info "Starting containers/services back up"
|
||||||
docker start $CONTAINERS_TO_STOP
|
# The container might be part of a stack when running in swarm mode, so
|
||||||
|
# its parent service needs to be restarted instead once backup is finished.
|
||||||
|
SERVICES_REQUIRING_UPDATE=""
|
||||||
|
for CONTAINER_ID in $CONTAINERS_TO_STOP; do
|
||||||
|
SWARM_SERVICE_NAME=$(
|
||||||
|
docker inspect \
|
||||||
|
--format "{{ index .Config.Labels \"com.docker.swarm.service.name\" }}" \
|
||||||
|
$CONTAINER_ID
|
||||||
|
)
|
||||||
|
if [ -z "$SWARM_SERVICE_NAME" ]; then
|
||||||
|
echo "Restarting $(docker start $CONTAINER_ID)"
|
||||||
|
else
|
||||||
|
echo "Removing $(docker rm $CONTAINER_ID)"
|
||||||
|
# Multiple containers might belong to the same service, so they will
|
||||||
|
# be restarted only after all names are known.
|
||||||
|
SERVICES_REQUIRING_UPDATE="${SERVICES_REQUIRING_UPDATE} ${SWARM_SERVICE_NAME}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -n "$SERVICES_REQUIRING_UPDATE" ]; then
|
||||||
|
for SERVICE_NAME in $(echo -n "$SERVICES_REQUIRING_UPDATE" | tr ' ' '\n' | sort -u); do
|
||||||
|
docker service update --force $SERVICE_NAME
|
||||||
|
done
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
|
||||||
@@ -76,7 +98,12 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
|||||||
sleep "$BACKUP_PRUNING_LEEWAY"
|
sleep "$BACKUP_PRUNING_LEEWAY"
|
||||||
bucket=$AWS_S3_BUCKET_NAME
|
bucket=$AWS_S3_BUCKET_NAME
|
||||||
|
|
||||||
rule_applies_to=$(mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket" | wc -l)
|
rule_applies_to=$(
|
||||||
|
mc rm $MC_GLOBAL_OPTIONS --fake --recursive -force \
|
||||||
|
--older-than "${BACKUP_RETENTION_DAYS}d" \
|
||||||
|
"backup-target/$bucket" \
|
||||||
|
| wc -l
|
||||||
|
)
|
||||||
if [ "$rule_applies_to" == "0" ]; then
|
if [ "$rule_applies_to" == "0" ]; then
|
||||||
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
|
echo "No backups found older than the configured retention period of $BACKUP_RETENTION_DAYS days."
|
||||||
echo "Doing nothing."
|
echo "Doing nothing."
|
||||||
@@ -91,6 +118,8 @@ if [ ! -z "$BACKUP_RETENTION_DAYS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mc rm $MC_GLOBAL_OPTIONS --recursive -force --older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
mc rm $MC_GLOBAL_OPTIONS \
|
||||||
|
--recursive -force \
|
||||||
|
--older-than "${BACKUP_RETENTION_DAYS}d" "backup-target/$bucket"
|
||||||
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
echo "Successfully pruned ${rule_applies_to} backups older than ${BACKUP_RETENTION_DAYS} days."
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
|||||||
|
|
||||||
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
|
||||||
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
|
||||||
|
AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
|
||||||
|
|
||||||
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
|
||||||
|
|
||||||
@@ -29,7 +30,7 @@ EOF
|
|||||||
chmod a+x env.sh
|
chmod a+x env.sh
|
||||||
source env.sh
|
source env.sh
|
||||||
|
|
||||||
mc $MC_GLOBAL_OPTIONS alias set backup-target "https://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
mc $MC_GLOBAL_OPTIONS alias set backup-target "$AWS_ENDPOINT_PROTO://$AWS_ENDPOINT" "$AWS_ACCESS_KEY_ID" "$AWS_SECRET_ACCESS_KEY"
|
||||||
|
|
||||||
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
|
||||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||||
|
|||||||
42
test/default/docker-compose.yml
Normal file
42
test/default/docker-compose.yml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
minio:
|
||||||
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
environment:
|
||||||
|
MINIO_ROOT_USER: test
|
||||||
|
MINIO_ROOT_PASSWORD: test
|
||||||
|
MINIO_ACCESS_KEY: test
|
||||||
|
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||||
|
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||||
|
volumes:
|
||||||
|
- backup_data:/data
|
||||||
|
|
||||||
|
backup: &default_backup_service
|
||||||
|
image: offen/docker-volume-backup:canary
|
||||||
|
depends_on:
|
||||||
|
- minio
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
AWS_ACCESS_KEY_ID: test
|
||||||
|
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||||
|
AWS_ENDPOINT: minio:9000
|
||||||
|
AWS_ENDPOINT_PROTO: http
|
||||||
|
AWS_S3_BUCKET_NAME: backup
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
backup_data:
|
||||||
|
app_data:
|
||||||
24
test/default/test.sh
Executable file
24
test/default/test.sh
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v default_backup_data:/data alpine \
|
||||||
|
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||||
|
|
||||||
|
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
||||||
|
echo "Expected all containers to be running post backup, instead seen:"
|
||||||
|
docker-compose ps
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
echo "Test passed"
|
||||||
Reference in New Issue
Block a user