mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1892d56ff6 | ||
|
|
0b205fe6dc | ||
|
|
8c8a2fa088 | ||
|
|
a850bf13fe | ||
|
|
b52b271bac | ||
|
|
cac5777e79 | ||
|
|
94a1edc4ad | ||
|
|
a654097e59 | ||
|
|
1b1fc4856c | ||
|
|
e81c34b8fc | ||
|
|
9c23767fce | ||
|
|
51af8c3c77 | ||
|
|
1ea0b51b23 | ||
|
|
da8c63f755 | ||
|
|
9bc8db0f7c | ||
|
|
508bc07b4f | ||
|
|
b8f71b04a1 | ||
|
|
5f3832d621 | ||
|
|
4b1127b8c4 | ||
|
|
ae50a3ac4f | ||
|
|
bad22eee93 | ||
|
|
c9ebb9e14e | ||
|
|
6e1b8553e6 | ||
|
|
5ec2b2c3ff | ||
|
|
3bbeba5b83 | ||
|
|
9155b4d130 | ||
|
|
2a17e84ab6 | ||
|
|
00f2359461 | ||
|
|
0504a92a1f | ||
|
|
3ded77448c |
@@ -48,6 +48,7 @@ jobs:
|
|||||||
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
||||||
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
||||||
|
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
|
||||||
fi
|
fi
|
||||||
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||||
$tag_args . --push
|
$tag_args . --push
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.17-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
@@ -14,7 +14,7 @@ FROM alpine:3.15
|
|||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --update ca-certificates
|
RUN apk add --no-cache ca-certificates
|
||||||
|
|
||||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
|
|
||||||
|
|||||||
395
README.md
395
README.md
@@ -7,7 +7,7 @@
|
|||||||
Backup Docker volumes locally or to any S3 compatible storage.
|
Backup Docker volumes locally or to any S3 compatible storage.
|
||||||
|
|
||||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
||||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||||
|
|
||||||
<!-- MarkdownTOC -->
|
<!-- MarkdownTOC -->
|
||||||
|
|
||||||
@@ -16,26 +16,33 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
|
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
|
||||||
- [Configuration reference](#configuration-reference)
|
- [Configuration reference](#configuration-reference)
|
||||||
- [How to](#how-to)
|
- [How to](#how-to)
|
||||||
- [Stopping containers during backup](#stopping-containers-during-backup)
|
- [Stop containers during backup](#stop-containers-during-backup)
|
||||||
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
||||||
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
||||||
- [Customize notifications](#customize-notifications)
|
- [Customize notifications](#customize-notifications)
|
||||||
|
- [Run custom commands before / after backup](#run-custom-commands-before--after-backup)
|
||||||
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
||||||
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
||||||
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
||||||
- [Using with Docker Swarm](#using-with-docker-swarm)
|
- [Using with Docker Swarm](#using-with-docker-swarm)
|
||||||
- [Manually triggering a backup](#manually-triggering-a-backup)
|
- [Manually triggering a backup](#manually-triggering-a-backup)
|
||||||
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
||||||
|
- [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage)
|
||||||
|
- [Using a custom Docker host](#using-a-custom-docker-host)
|
||||||
|
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
|
||||||
|
- [Define different retention schedules](#define-different-retention-schedules)
|
||||||
- [Recipes](#recipes)
|
- [Recipes](#recipes)
|
||||||
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
||||||
- [Backing up to Filebase](#backing-up-to-filebase)
|
- [Backing up to Filebase](#backing-up-to-filebase)
|
||||||
- [Backing up to MinIO](#backing-up-to-minio)
|
- [Backing up to MinIO](#backing-up-to-minio)
|
||||||
- [Backing up to WebDAV](#backing-up-to-webdav)
|
- [Backing up to WebDAV](#backing-up-to-webdav)
|
||||||
|
- [Backing up to SSH](#backing-up-to-ssh)
|
||||||
- [Backing up locally](#backing-up-locally)
|
- [Backing up locally](#backing-up-locally)
|
||||||
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
||||||
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
||||||
- [Rotating away backups that are older than 7 days](#rotating-away-backups-that-are-older-than-7-days)
|
- [Rotating away backups that are older than 7 days](#rotating-away-backups-that-are-older-than-7-days)
|
||||||
- [Encrypting your backups using GPG](#encrypting-your-backups-using-gpg)
|
- [Encrypting your backups using GPG](#encrypting-your-backups-using-gpg)
|
||||||
|
- [Using mysqldump to prepare the backup](#using-mysqldump-to-prepare-the-backup)
|
||||||
- [Running multiple instances in the same setup](#running-multiple-instances-in-the-same-setup)
|
- [Running multiple instances in the same setup](#running-multiple-instances-in-the-same-setup)
|
||||||
- [Differences to `futurice/docker-volume-backup`](#differences-to-futuricedocker-volume-backup)
|
- [Differences to `futurice/docker-volume-backup`](#differences-to-futuricedocker-volume-backup)
|
||||||
|
|
||||||
@@ -103,7 +110,7 @@ docker run --rm \
|
|||||||
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
||||||
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
||||||
--entrypoint backup \
|
--entrypoint backup \
|
||||||
offen/docker-volume-backup:latest
|
offen/docker-volume-backup:v2
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
@@ -145,6 +152,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
||||||
|
|
||||||
|
# ************************************************************************
|
||||||
|
# The BACKUP_FROM_SNAPSHOT option has been deprecated and will be removed
|
||||||
|
# in the next major version. Please use exec-pre and exec-post
|
||||||
|
# as documented below instead.
|
||||||
|
# ************************************************************************
|
||||||
# Whether to copy the content of backup folder before creating the tar archive.
|
# Whether to copy the content of backup folder before creating the tar archive.
|
||||||
# In the rare scenario where the content of the source backup volume is continously
|
# In the rare scenario where the content of the source backup volume is continously
|
||||||
# updating, but we do not wish to stop the container while performing the backup,
|
# updating, but we do not wish to stop the container while performing the backup,
|
||||||
@@ -152,6 +164,17 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_FROM_SNAPSHOT="false"
|
# BACKUP_FROM_SNAPSHOT="false"
|
||||||
|
|
||||||
|
# By default, the `/backup` directory inside the container will be backed up.
|
||||||
|
# In case you need to use a custom location, set `BACKUP_SOURCES`.
|
||||||
|
|
||||||
|
# BACKUP_SOURCES="/other/location"
|
||||||
|
|
||||||
|
# When given, all files in BACKUP_SOURCES whose full path matches the given
|
||||||
|
# regular expression will be excluded from the archive. Regular Expressions
|
||||||
|
# can be used as from the Go standard library https://pkg.go.dev/regexp
|
||||||
|
|
||||||
|
# BACKUP_EXCLUDE_REGEXP="\.log$"
|
||||||
|
|
||||||
########### BACKUP STORAGE
|
########### BACKUP STORAGE
|
||||||
|
|
||||||
# The name of the remote bucket that should be used for storing backups. If
|
# The name of the remote bucket that should be used for storing backups. If
|
||||||
@@ -192,9 +215,9 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# AWS_ENDPOINT_PROTO="https"
|
# AWS_ENDPOINT_PROTO="https"
|
||||||
|
|
||||||
# Setting this variable to `true` will disable verification of
|
# Setting this variable to `true` will disable verification of
|
||||||
# SSL certificates. You shouldn't use this unless you use self-signed
|
# SSL certificates for AWS_ENDPOINT. You shouldn't use this unless you use
|
||||||
# certificates for your remote storage backend. This can only be used
|
# self-signed certificates for your remote storage backend. This can only be
|
||||||
# when AWS_ENDPOINT_PROTO is set to `https`.
|
# used when AWS_ENDPOINT_PROTO is set to `https`.
|
||||||
|
|
||||||
# AWS_ENDPOINT_INSECURE="true"
|
# AWS_ENDPOINT_INSECURE="true"
|
||||||
|
|
||||||
@@ -217,6 +240,46 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# WEBDAV_PASSWORD="password"
|
# WEBDAV_PASSWORD="password"
|
||||||
|
|
||||||
|
# Setting this variable to `true` will disable verification of
|
||||||
|
# SSL certificates for WEBDAV_URL. You shouldn't use this unless you use
|
||||||
|
# self-signed certificates for your remote storage backend.
|
||||||
|
|
||||||
|
# WEBDAV_URL_INSECURE="true"
|
||||||
|
|
||||||
|
# You can also backup files to any SSH server:
|
||||||
|
|
||||||
|
# The URL of the remote SSH server
|
||||||
|
|
||||||
|
# SSH_HOST_NAME="server.local"
|
||||||
|
|
||||||
|
# The port of the remote SSH server
|
||||||
|
# Optional variable default value is `22`
|
||||||
|
|
||||||
|
# SSH_PORT=2222
|
||||||
|
|
||||||
|
# The Directory to place the backups to on the SSH server.
|
||||||
|
|
||||||
|
# SSH_REMOTE_PATH="/my/directory/"
|
||||||
|
|
||||||
|
# The username for the SSH server
|
||||||
|
|
||||||
|
# SSH_USER="user"
|
||||||
|
|
||||||
|
# The password for the SSH server
|
||||||
|
|
||||||
|
# SSH_PASSWORD="password"
|
||||||
|
|
||||||
|
# The private key path in container for SSH server
|
||||||
|
# Default value: /root/.ssh/id_rsa
|
||||||
|
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
|
||||||
|
# also work.
|
||||||
|
|
||||||
|
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
|
||||||
|
|
||||||
|
# The passphrase for the identity file
|
||||||
|
|
||||||
|
# SSH_IDENTITY_PASSPHRASE="pass"
|
||||||
|
|
||||||
# In addition to storing backups remotely, you can also keep local copies.
|
# In addition to storing backups remotely, you can also keep local copies.
|
||||||
# Pass a container-local path to store your backups if needed. You also need to
|
# Pass a container-local path to store your backups if needed. You also need to
|
||||||
# mount a local folder or Docker volume into that location (`/archive`
|
# mount a local folder or Docker volume into that location (`/archive`
|
||||||
@@ -278,6 +341,27 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_STOP_CONTAINER_LABEL="service1"
|
# BACKUP_STOP_CONTAINER_LABEL="service1"
|
||||||
|
|
||||||
|
########### EXECUTING COMMANDS IN CONTAINERS PRE/POST BACKUP
|
||||||
|
|
||||||
|
# It is possible to define commands to be run in any container before and after
|
||||||
|
# a backup is conducted. The commands themselves are defined in labels like
|
||||||
|
# `docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
|
||||||
|
# Several options exist for controlling this feature:
|
||||||
|
|
||||||
|
# By default, any output of such a command is suppressed. If this value
|
||||||
|
# is configured to be "true", command execution output will be forwarded to
|
||||||
|
# the backup container's stdout and stderr.
|
||||||
|
|
||||||
|
# EXEC_FORWARD_OUTPUT="true"
|
||||||
|
|
||||||
|
# Without any further configuration, all commands defined in labels will be
|
||||||
|
# run before and after a backup. If you need more fine grained control, you
|
||||||
|
# can use this option to set a label that will be used for narrowing down
|
||||||
|
# the set of eligible containers. When set, an eligible container will also need
|
||||||
|
# to be labeled as `docker-volume-backup.exec-label=database`.
|
||||||
|
|
||||||
|
# EXEC_LABEL="database"
|
||||||
|
|
||||||
########### NOTIFICATIONS
|
########### NOTIFICATIONS
|
||||||
|
|
||||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||||
@@ -298,6 +382,23 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# NOTIFICATION_LEVEL="error"
|
# NOTIFICATION_LEVEL="error"
|
||||||
|
|
||||||
|
########### DOCKER HOST
|
||||||
|
|
||||||
|
# If you are interfacing with Docker via TCP you can set the Docker host here
|
||||||
|
# instead of mounting the Docker socket as a volume. This is unset by default.
|
||||||
|
|
||||||
|
# DOCKER_HOST="tcp://docker_socket_proxy:2375"
|
||||||
|
|
||||||
|
########### LOCK_TIMEOUT
|
||||||
|
|
||||||
|
# In the case of overlapping cron schedules run by the same container,
|
||||||
|
# subsequent invocations will wait for previous runs to finish before starting.
|
||||||
|
# By default, this will time out and fail in case the lock could not be acquired
|
||||||
|
# after 60 minutes. In case you need to adjust this timeout, supply a duration
|
||||||
|
# value as per https://pkg.go.dev/time#ParseDuration to `LOCK_TIMEOUT`
|
||||||
|
|
||||||
|
# LOCK_TIMEOUT="60m"
|
||||||
|
|
||||||
########### EMAIL NOTIFICATIONS
|
########### EMAIL NOTIFICATIONS
|
||||||
|
|
||||||
# ************************************************************************
|
# ************************************************************************
|
||||||
@@ -329,14 +430,14 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# EMAIL_SMTP_PORT="<port>"
|
# EMAIL_SMTP_PORT="<port>"
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`].
|
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`][compose-issue].
|
||||||
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
||||||
|
|
||||||
[compose-issue]: https://github.com/docker/compose/issues/2854
|
[compose-issue]: https://github.com/docker/compose/issues/2854
|
||||||
|
|
||||||
## How to
|
## How to
|
||||||
|
|
||||||
### Stopping containers during backup
|
### Stop containers during backup
|
||||||
|
|
||||||
In many cases, it will be desirable to stop the services that are consuming the volume you want to backup in order to ensure data integrity.
|
In many cases, it will be desirable to stop the services that are consuming the volume you want to backup in order to ensure data integrity.
|
||||||
This image can automatically stop and restart containers and services (in case you are running Docker in Swarm mode).
|
This image can automatically stop and restart containers and services (in case you are running Docker in Swarm mode).
|
||||||
@@ -354,7 +455,7 @@ services:
|
|||||||
- docker-volume-backup.stop-during-backup=service1
|
- docker-volume-backup.stop-during-backup=service1
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_STOP_CONTAINER_LABEL: service1
|
BACKUP_STOP_CONTAINER_LABEL: service1
|
||||||
volumes:
|
volumes:
|
||||||
@@ -377,7 +478,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_PRUNING_PREFIX: backup-
|
BACKUP_PRUNING_PREFIX: backup-
|
||||||
@@ -400,7 +501,7 @@ version: '3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
# ... other configuration values go here
|
# ... other configuration values go here
|
||||||
NOTIFICATION_URLS=smtp://me:secret@smtp.example.com:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
NOTIFICATION_URLS=smtp://me:secret@smtp.example.com:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
||||||
@@ -436,6 +537,63 @@ Overridable template names are: `title_success`, `body_success`, `title_failure`
|
|||||||
|
|
||||||
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
|
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
|
||||||
|
|
||||||
|
### Run custom commands before / after backup
|
||||||
|
|
||||||
|
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||||
|
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container.
|
||||||
|
Such commands are defined by specifying the command in a `docker-volume-backup.exec-[pre|post]` label.
|
||||||
|
|
||||||
|
Taking a database dump using `mysqldump` would look like this:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
database:
|
||||||
|
image: mariadb
|
||||||
|
volumes:
|
||||||
|
- backup_data:/tmp/backups
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
backup_data:
|
||||||
|
```
|
||||||
|
|
||||||
|
Due to Docker limitations, you currently cannot use any kind of redirection in these commands unless you pass the command to `/bin/sh -c` or similar.
|
||||||
|
I.e. instead of using `echo "ok" > ok.txt` you will need to use `/bin/sh -c 'echo "ok" > ok.txt'`.
|
||||||
|
|
||||||
|
If you need fine grained control about which container's commands are run, you can use the `EXEC_LABEL` configuration on your `docker-volume-backup` container:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
database:
|
||||||
|
image: mariadb
|
||||||
|
volumes:
|
||||||
|
- backup_data:/tmp/backups
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
|
||||||
|
- docker-volume-backup.exec-label=database
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
EXEC_LABEL: database
|
||||||
|
volumes:
|
||||||
|
- data:/backup/dump:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
backup_data:
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
The backup procedure is guaranteed to wait for all `pre` commands to finish.
|
||||||
|
However there are no guarantees about the order in which they are run, which could also happen concurrently.
|
||||||
|
|
||||||
### Encrypting your backup using GPG
|
### Encrypting your backup using GPG
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box.
|
The image supports encrypting backups using GPG out of the box.
|
||||||
@@ -467,6 +625,26 @@ In case you need to restore a volume from a backup, the most straight forward pr
|
|||||||
|
|
||||||
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
If you want to rollback an entire volume to an earlier backup snapshot (recommended for database volumes):
|
||||||
|
|
||||||
|
- Trigger a manual backup if necessary (see `Manually triggering a backup`).
|
||||||
|
- Stop the container(s) that are using the volume.
|
||||||
|
- If volume was initially created using docker-compose, find out exact volume name using:
|
||||||
|
```console
|
||||||
|
docker volume ls
|
||||||
|
```
|
||||||
|
- Remove existing volume (the example assumes it's named `data`):
|
||||||
|
```console
|
||||||
|
docker volume rm data
|
||||||
|
```
|
||||||
|
- Create new volume with the same name and restore a snapshot:
|
||||||
|
```console
|
||||||
|
docker run --rm -it -v data:/backup/my-app-backup -v /path/to/local_backups:/archive:ro alpine tar -xvzf /archive/full_backup_filename.tar.gz
|
||||||
|
```
|
||||||
|
- Restart the container(s) that are using the volume.
|
||||||
|
|
||||||
### Set the timezone the container runs in
|
### Set the timezone the container runs in
|
||||||
|
|
||||||
By default a container based on this image will run in the UTC timezone.
|
By default a container based on this image will run in the UTC timezone.
|
||||||
@@ -478,7 +656,7 @@ version: '3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /etc/timezone:/etc/timezone:ro
|
||||||
@@ -501,7 +679,7 @@ When running in Swarm mode, it's also advised to set a hard memory limit on your
|
|||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
deployment:
|
deployment:
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
@@ -536,6 +714,106 @@ After:
|
|||||||
NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
|
||||||
|
|
||||||
|
Starting with version 2.15.0, the `BACKUP_FROM_SNAPSHOT` feature has been deprecated.
|
||||||
|
If you need to prepare your sources before the backup is taken, use `exec-pre`, `exec-post` and an intermediate volume:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
my_app:
|
||||||
|
build: .
|
||||||
|
volumes:
|
||||||
|
- data:/var/my_app
|
||||||
|
- backup:/tmp/backup
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
|
||||||
|
- docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
environment:
|
||||||
|
BACKUP_SOURCES: /tmp/backup
|
||||||
|
volumes:
|
||||||
|
- backup:/backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
backup:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using a custom Docker host
|
||||||
|
|
||||||
|
If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL.
|
||||||
|
```ini
|
||||||
|
DOCKER_HOST=tcp://docker_socket_proxy:2375
|
||||||
|
```
|
||||||
|
|
||||||
|
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
|
||||||
|
|
||||||
|
### Run multiple backup schedules in the same container
|
||||||
|
|
||||||
|
Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- ./configuration:/etc/dockervolumebackup/conf.d
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
|
A separate cronjob will be created for each config file.
|
||||||
|
If a configuration value is set both in the global environment as well as in the config file, the config file will take precedence.
|
||||||
|
The `backup` command expects to run on an exclusive lock, so in case you provide the same or overlapping schedules in your cron expressions, the runs will still be executed serially, one after the other.
|
||||||
|
The exact order of schedules that use the same cron expression is not specified.
|
||||||
|
In case you need your schedules to overlap, you need to create a dedicated container for each schedule instead.
|
||||||
|
When changing the configuration, you currently need to manually restart the container for the changes to take effect.
|
||||||
|
|
||||||
|
### Define different retention schedules
|
||||||
|
|
||||||
|
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
|
||||||
|
|
||||||
|
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 01daily.conf
|
||||||
|
BACKUP_FILENAME="daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every day at 2am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
BACKUP_PRUNING_PREFIX="daily-backup-"
|
||||||
|
BACKUP_RETENTION_DAYS="7"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 02weekly.conf
|
||||||
|
BACKUP_FILENAME="weekly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every monday at 3am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 3 * * 1"
|
||||||
|
BACKUP_PRUNING_PREFIX="weekly-backup-"
|
||||||
|
BACKUP_RETENTION_DAYS="31"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 03monthly.conf
|
||||||
|
BACKUP_FILENAME="monthly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every 1st of a month at 4am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 4 1 * *"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that while it's possible to define colliding cron schedules for each of these configurations, you might need to adjust the value for `LOCK_TIMEOUT` in case your backups are large and might take longer than an hour.
|
||||||
|
|
||||||
## Recipes
|
## Recipes
|
||||||
|
|
||||||
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
||||||
@@ -548,9 +826,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -569,10 +847,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_ENDPOINT: s3.filebase.com
|
AWS_ENDPOINT: s3.filebase.com
|
||||||
AWS_BUCKET_NAME: filebase-bucket
|
AWS_S3_BUCKET_NAME: filebase-bucket
|
||||||
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
|
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
|
||||||
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
|
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -591,10 +869,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_ENDPOINT: minio.example.com
|
AWS_ENDPOINT: minio.example.com
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: MINIOACCESSKEY
|
AWS_ACCESS_KEY_ID: MINIOACCESSKEY
|
||||||
AWS_SECRET_ACCESS_KEY: MINIOSECRETKEY
|
AWS_SECRET_ACCESS_KEY: MINIOSECRETKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -613,7 +891,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
WEBDAV_URL: https://webdav.mydomain.me
|
WEBDAV_URL: https://webdav.mydomain.me
|
||||||
WEBDAV_PATH: /my/directory/
|
WEBDAV_PATH: /my/directory/
|
||||||
@@ -627,6 +905,29 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Backing up to SSH
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
SSH_HOST_NAME: server.local
|
||||||
|
SSH_PORT: 2222
|
||||||
|
SSH_USER: user
|
||||||
|
SSH_REMOTE_PATH: /data
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- /path/to/private_key:/root/.ssh/id
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Backing up locally
|
### Backing up locally
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
@@ -635,7 +936,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
|
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
|
||||||
@@ -656,9 +957,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -678,11 +979,11 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
# take a backup on every hour
|
# take a backup on every hour
|
||||||
BACKUP_CRON_EXPRESSION: "0 * * * *"
|
BACKUP_CRON_EXPRESSION: "0 * * * *"
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -701,9 +1002,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
@@ -725,9 +1026,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
GPG_PASSPHRASE: somesecretstring
|
GPG_PASSPHRASE: somesecretstring
|
||||||
@@ -739,6 +1040,32 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Using mysqldump to prepare the backup
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
database:
|
||||||
|
image: mariadb:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||||
|
volumes:
|
||||||
|
- app_data:/tmp/dumps
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: db.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- data:/backup/data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Running multiple instances in the same setup
|
### Running multiple instances in the same setup
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
@@ -747,10 +1074,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data_1` and `data_2` volumes here
|
# ... define other services using the `data_1` and `data_2` volumes here
|
||||||
backup_1: &backup_service
|
backup_1: &backup_service
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment: &backup_environment
|
environment: &backup_environment
|
||||||
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
|
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
|
||||||
@@ -780,12 +1107,12 @@ This image is heavily inspired by `futurice/docker-volume-backup`. We decided to
|
|||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||||
This version is roughly 1/25 in compressed size (it's ~12MB).
|
This version is roughly 1/25 in compressed size (it's ~12MB).
|
||||||
- The original image uses a shell script, when this version is written in Go, which makes it easier to extend and maintain (more verbose too).
|
- The original image uses a shell script, when this version is written in Go.
|
||||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies.
|
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies.
|
||||||
This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO.
|
This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO.
|
||||||
Local copies of backups can also be pruned once they reach a certain age.
|
Local copies of backups can also be pruned once they reach a certain age.
|
||||||
- InfluxDB specific functionality from the original image was removed.
|
- InfluxDB specific functionality from the original image was removed.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
- Notifications on failed backups are supported
|
- Notifications on finished backups are supported.
|
||||||
- IAM authentication through instance profiles is supported
|
- IAM authentication through instance profiles is supported.
|
||||||
|
|||||||
133
cmd/backup/archive.go
Normal file
133
cmd/backup/archive.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
||||||
|
// Licensed under the MIT License: https://github.com/walle/targz/blob/57fe4206da5abf7dd3901b4af3891ec2f08c7b08/LICENSE
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
||||||
|
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||||
|
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripTrailingSlashes(path string) string {
|
||||||
|
if len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[0 : len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error) {
|
||||||
|
inputFilePath, err := filepath.Abs(inputFilePath)
|
||||||
|
if err == nil {
|
||||||
|
outputFilePath, err = filepath.Abs(outputFilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return inputFilePath, outputFilePath, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func compress(paths []string, outFilePath, subPath string) error {
|
||||||
|
file, err := os.Create(outFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := path.Dir(outFilePath)
|
||||||
|
gzipWriter := gzip.NewWriter(file)
|
||||||
|
tarWriter := tar.NewWriter(gzipWriter)
|
||||||
|
|
||||||
|
for _, p := range paths {
|
||||||
|
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
||||||
|
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tarWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gzipWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing gzip writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
||||||
|
fileInfo, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var link string
|
||||||
|
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
var err error
|
||||||
|
if link, err = os.Readlink(path); err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
|
||||||
|
}
|
||||||
|
header.Name = strings.TrimPrefix(path, prefix)
|
||||||
|
|
||||||
|
err = tarWriter.WriteHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fileInfo.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(tarWriter, file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -3,7 +3,11 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
// by users.
|
// by users.
|
||||||
@@ -18,6 +22,7 @@ type Config struct {
|
|||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||||
BackupFromSnapshot bool `split_words:"true"`
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
AwsS3BucketName string `split_words:"true"`
|
AwsS3BucketName string `split_words:"true"`
|
||||||
AwsS3Path string `split_words:"true"`
|
AwsS3Path string `split_words:"true"`
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
@@ -36,7 +41,34 @@ type Config struct {
|
|||||||
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
WebdavUrl string `split_words:"true"`
|
WebdavUrl string `split_words:"true"`
|
||||||
|
WebdavUrlInsecure bool `split_words:"true"`
|
||||||
WebdavPath string `split_words:"true" default:"/"`
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
WebdavUsername string `split_words:"true"`
|
WebdavUsername string `split_words:"true"`
|
||||||
WebdavPassword string `split_words:"true"`
|
WebdavPassword string `split_words:"true"`
|
||||||
|
SSHHostName string `split_words:"true"`
|
||||||
|
SSHPort string `split_words:"true" default:"22"`
|
||||||
|
SSHUser string `split_words:"true"`
|
||||||
|
SSHPassword string `split_words:"true"`
|
||||||
|
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
||||||
|
SSHIdentityPassphrase string `split_words:"true"`
|
||||||
|
SSHRemotePath string `split_words:"true"`
|
||||||
|
ExecLabel string `split_words:"true"`
|
||||||
|
ExecForwardOutput bool `split_words:"true"`
|
||||||
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegexpDecoder struct {
|
||||||
|
Re *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpDecoder) Decode(v string) error {
|
||||||
|
if v == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
re, err := regexp.Compile(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
||||||
|
}
|
||||||
|
*r = RegexpDecoder{Re: re}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
123
cmd/backup/exec.go
Normal file
123
cmd/backup/exec.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
||||||
|
// Licensed under the Apache 2.0 License: https://github.com/moby/moby/blob/8e610b2b55bfd1bfa9436ab110d311f5e8a74dcb/LICENSE
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cosiner/argv"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
|
||||||
|
args, _ := argv.Argv(command, nil, nil)
|
||||||
|
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
||||||
|
Cmd: args[0],
|
||||||
|
AttachStdin: true,
|
||||||
|
AttachStderr: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Close()
|
||||||
|
|
||||||
|
var outBuf, errBuf bytes.Buffer
|
||||||
|
outputDone := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
|
||||||
|
outputDone <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-outputDone:
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := ioutil.ReadAll(&outBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
||||||
|
}
|
||||||
|
stderr, err := ioutil.ReadAll(&errBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.ExitCode > 0 {
|
||||||
|
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, stderr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) runLabeledCommands(label string) error {
|
||||||
|
f := []filters.KeyValuePair{
|
||||||
|
{Key: "label", Value: label},
|
||||||
|
}
|
||||||
|
if s.c.ExecLabel != "" {
|
||||||
|
f = append(f, filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(containersWithCommand) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
g := new(errgroup.Group)
|
||||||
|
|
||||||
|
for _, container := range containersWithCommand {
|
||||||
|
c := container
|
||||||
|
g.Go(func() error {
|
||||||
|
cmd, _ := c.Labels[label]
|
||||||
|
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
||||||
|
stdout, stderr, err := s.exec(c.ID, cmd)
|
||||||
|
if s.c.ExecForwardOutput {
|
||||||
|
os.Stderr.Write(stderr)
|
||||||
|
os.Stdout.Write(stdout)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
58
cmd/backup/lock.go
Normal file
58
cmd/backup/lock.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gofrs/flock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
|
// caller invokes the returned release func. In case the lock is currently blocked
|
||||||
|
// by another execution, it will repeatedly retry until the lock is available
|
||||||
|
// or the given timeout is exceeded.
|
||||||
|
func (s *script) lock(lockfile string) (func() error, error) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
s.stats.LockedTime = time.Now().Sub(start)
|
||||||
|
}()
|
||||||
|
|
||||||
|
retry := time.NewTicker(5 * time.Second)
|
||||||
|
defer retry.Stop()
|
||||||
|
deadline := time.NewTimer(s.c.LockTimeout)
|
||||||
|
defer deadline.Stop()
|
||||||
|
|
||||||
|
fileLock := flock.New(lockfile)
|
||||||
|
|
||||||
|
for {
|
||||||
|
acquired, err := fileLock.TryLock()
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("lock: error trying lock: %w", err)
|
||||||
|
}
|
||||||
|
if acquired {
|
||||||
|
if s.encounteredLock {
|
||||||
|
s.logger.Info("Acquired exclusive lock on subsequent attempt, ready to continue.")
|
||||||
|
}
|
||||||
|
return fileLock.Unlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.encounteredLock {
|
||||||
|
s.logger.Infof(
|
||||||
|
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
||||||
|
s.c.LockTimeout,
|
||||||
|
)
|
||||||
|
s.encounteredLock = true
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-retry.C:
|
||||||
|
continue
|
||||||
|
case <-deadline.C:
|
||||||
|
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,14 +8,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
unlock := lock("/var/lock/dockervolumebackup.lock")
|
|
||||||
defer unlock()
|
|
||||||
|
|
||||||
s, err := newScript()
|
s, err := newScript()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
|
defer unlock()
|
||||||
|
s.must(err)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if pArg := recover(); pArg != nil {
|
if pArg := recover(); pArg != nil {
|
||||||
if err, ok := pArg.(error); ok {
|
if err, ok := pArg.(error); ok {
|
||||||
@@ -38,6 +39,13 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
s.must(func() error {
|
s.must(func() error {
|
||||||
|
runPostCommands, err := s.runCommands()
|
||||||
|
defer func() {
|
||||||
|
s.must(runPostCommands())
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
restartContainers, err := s.stopContainers()
|
restartContainers, err := s.stopContainers()
|
||||||
// The mechanism for restarting containers is not using hooks as it
|
// The mechanism for restarting containers is not using hooks as it
|
||||||
// should happen as soon as possible (i.e. before uploading backups or
|
// should happen as soon as possible (i.e. before uploading backups or
|
||||||
|
|||||||
@@ -7,11 +7,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -23,7 +28,6 @@ import (
|
|||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/kelseyhightower/envconfig"
|
"github.com/kelseyhightower/envconfig"
|
||||||
"github.com/leekchan/timeutil"
|
"github.com/leekchan/timeutil"
|
||||||
"github.com/m90/targz"
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
"github.com/otiai10/copy"
|
"github.com/otiai10/copy"
|
||||||
@@ -38,6 +42,8 @@ type script struct {
|
|||||||
cli *client.Client
|
cli *client.Client
|
||||||
minioClient *minio.Client
|
minioClient *minio.Client
|
||||||
webdavClient *gowebdav.Client
|
webdavClient *gowebdav.Client
|
||||||
|
sshClient *ssh.Client
|
||||||
|
sftpClient *sftp.Client
|
||||||
logger *logrus.Logger
|
logger *logrus.Logger
|
||||||
sender *router.ServiceRouter
|
sender *router.ServiceRouter
|
||||||
template *template.Template
|
template *template.Template
|
||||||
@@ -47,6 +53,8 @@ type script struct {
|
|||||||
file string
|
file string
|
||||||
stats *Stats
|
stats *Stats
|
||||||
|
|
||||||
|
encounteredLock bool
|
||||||
|
|
||||||
c *Config
|
c *Config
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,6 +152,66 @@ func newScript() (*script, error) {
|
|||||||
} else {
|
} else {
|
||||||
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
||||||
s.webdavClient = webdavClient
|
s.webdavClient = webdavClient
|
||||||
|
if s.c.WebdavUrlInsecure {
|
||||||
|
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
|
||||||
|
}
|
||||||
|
webdavTransport := defaultTransport.Clone()
|
||||||
|
webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure
|
||||||
|
s.webdavClient.SetTransport(webdavTransport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.SSHHostName != "" {
|
||||||
|
var authMethods []ssh.AuthMethod
|
||||||
|
|
||||||
|
if s.c.SSHPassword != "" {
|
||||||
|
authMethods = append(authMethods, ssh.Password(s.c.SSHPassword))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.SSHIdentityFile); err == nil {
|
||||||
|
key, err := ioutil.ReadFile(s.c.SSHIdentityFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error reading the private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
var signer ssh.Signer
|
||||||
|
if s.c.SSHIdentityPassphrase != "" {
|
||||||
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the encrypted private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
} else {
|
||||||
|
signer, err = ssh.ParsePrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sshClientConfig := &ssh.ClientConfig{
|
||||||
|
User: s.c.SSHUser,
|
||||||
|
Auth: authMethods,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}
|
||||||
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig)
|
||||||
|
s.sshClient = sshClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating ssh client: %w", err)
|
||||||
|
}
|
||||||
|
_, _, err = s.sshClient.SendRequest("keepalive", false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sftpClient, err := sftp.NewClient(sshClient)
|
||||||
|
s.sftpClient = sftpClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating sftp client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,6 +281,22 @@ func newScript() (*script, error) {
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *script) runCommands() (func() error, error) {
|
||||||
|
if s.cli == nil {
|
||||||
|
return noop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runLabeledCommands("docker-volume-backup.exec-pre"); err != nil {
|
||||||
|
return noop, fmt.Errorf("runCommands: error running pre commands: %w", err)
|
||||||
|
}
|
||||||
|
return func() error {
|
||||||
|
if err := s.runLabeledCommands("docker-volume-backup.exec-post"); err != nil {
|
||||||
|
return fmt.Errorf("runCommands: error running post commands: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
// stopContainers stops all Docker containers that are marked as to being
|
// stopContainers stops all Docker containers that are marked as to being
|
||||||
// stopped during the backup and returns a function that can be called to
|
// stopped during the backup and returns a function that can be called to
|
||||||
// restart everything that has been stopped.
|
// restart everything that has been stopped.
|
||||||
@@ -338,6 +422,12 @@ func (s *script) takeBackup() error {
|
|||||||
backupSources := s.c.BackupSources
|
backupSources := s.c.BackupSources
|
||||||
|
|
||||||
if s.c.BackupFromSnapshot {
|
if s.c.BackupFromSnapshot {
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use `exec-pre` and `exec-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
@@ -364,7 +454,28 @@ func (s *script) takeBackup() error {
|
|||||||
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err := targz.Compress(backupSources, tarFile); err != nil {
|
|
||||||
|
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error getting absolute path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var filesEligibleForBackup []string
|
||||||
|
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -457,6 +568,52 @@ func (s *script) copyBackup() error {
|
|||||||
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
source, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
|
||||||
|
chunk := make([]byte, 1000000)
|
||||||
|
for {
|
||||||
|
num, err := source.Read(chunk)
|
||||||
|
if err == io.EOF {
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath)
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
@@ -512,7 +669,8 @@ func (s *script) pruneBackups() error {
|
|||||||
if s.minioClient != nil {
|
if s.minioClient != nil {
|
||||||
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
||||||
WithMetadata: true,
|
WithMetadata: true,
|
||||||
Prefix: s.c.BackupPruningPrefix,
|
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
|
||||||
|
Recursive: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
var matches []minio.ObjectInfo
|
var matches []minio.ObjectInfo
|
||||||
@@ -565,6 +723,9 @@ func (s *script) pruneBackups() error {
|
|||||||
var matches []fs.FileInfo
|
var matches []fs.FileInfo
|
||||||
var lenCandidates int
|
var lenCandidates int
|
||||||
for _, candidate := range candidates {
|
for _, candidate := range candidates {
|
||||||
|
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
lenCandidates++
|
lenCandidates++
|
||||||
if candidate.ModTime().Before(deadline) {
|
if candidate.ModTime().Before(deadline) {
|
||||||
matches = append(matches, candidate)
|
matches = append(matches, candidate)
|
||||||
@@ -586,6 +747,37 @@ func (s *script) pruneBackups() error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if candidate.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.SSH = StorageStats{
|
||||||
|
Total: uint(len(candidates)),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), len(candidates), "SSH backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
globPattern := path.Join(
|
globPattern := path.Join(
|
||||||
s.c.BackupArchive,
|
s.c.BackupArchive,
|
||||||
|
|||||||
@@ -30,10 +30,11 @@ type StorageStats struct {
|
|||||||
PruneErrors uint
|
PruneErrors uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoragesStats stats about each possible archival location (Local, WebDAV, S3)
|
// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3)
|
||||||
type StoragesStats struct {
|
type StoragesStats struct {
|
||||||
Local StorageStats
|
Local StorageStats
|
||||||
WebDAV StorageStats
|
WebDAV StorageStats
|
||||||
|
SSH StorageStats
|
||||||
S3 StorageStats
|
S3 StorageStats
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,6 +43,7 @@ type Stats struct {
|
|||||||
StartTime time.Time
|
StartTime time.Time
|
||||||
EndTime time.Time
|
EndTime time.Time
|
||||||
TookTime time.Duration
|
TookTime time.Duration
|
||||||
|
LockedTime time.Duration
|
||||||
LogOutput *bytes.Buffer
|
LogOutput *bytes.Buffer
|
||||||
Containers ContainersStats
|
Containers ContainersStats
|
||||||
BackupFile BackupFileStats
|
BackupFile BackupFileStats
|
||||||
|
|||||||
@@ -10,27 +10,10 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gofrs/flock"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var noop = func() error { return nil }
|
var noop = func() error { return nil }
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
|
||||||
// caller invokes the returned release func. When invoked while the file is
|
|
||||||
// still locked the function panics.
|
|
||||||
func lock(lockfile string) func() error {
|
|
||||||
fileLock := flock.New(lockfile)
|
|
||||||
acquired, err := fileLock.TryLock()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if !acquired {
|
|
||||||
panic("unable to acquire file lock")
|
|
||||||
}
|
|
||||||
return fileLock.Unlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy creates a copy of the file located at `dst` at `src`.
|
// copy creates a copy of the file located at `dst` at `src`.
|
||||||
func copyFile(src, dst string) error {
|
func copyFile(src, dst string) error {
|
||||||
in, err := os.Open(src)
|
in, err := os.Open(src)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ Here is a list of all data passed to the template:
|
|||||||
* `StartTime`: time when the script started execution
|
* `StartTime`: time when the script started execution
|
||||||
* `EndTime`: time when the backup has completed successfully (after pruning)
|
* `EndTime`: time when the backup has completed successfully (after pruning)
|
||||||
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
||||||
|
* `LockedTime`: amount of time it took for the backup to acquire the exclusive lock
|
||||||
* `LogOutput`: full log of the application
|
* `LogOutput`: full log of the application
|
||||||
* `Containers`: object containing stats about the docker containers
|
* `Containers`: object containing stats about the docker containers
|
||||||
* `All`: total number of containers
|
* `All`: total number of containers
|
||||||
@@ -24,7 +25,7 @@ Here is a list of all data passed to the template:
|
|||||||
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
* `Size`: size in bytes of the backup file
|
* `Size`: size in bytes of the backup file
|
||||||
* `Storages`: object that holds stats about each storage
|
* `Storages`: object that holds stats about each storage
|
||||||
* `Local`, `S3` or `WebDAV`:
|
* `Local`, `S3`, `WebDAV` or `SSH`:
|
||||||
* `Total`: total number of backup files
|
* `Total`: total number of backup files
|
||||||
* `Pruned`: number of backup files that were deleted due to pruning rule
|
* `Pruned`: number of backup files that were deleted due to pruning rule
|
||||||
* `PruneErrors`: number of backup files that were unable to be pruned
|
* `PruneErrors`: number of backup files that were unable to be pruned
|
||||||
|
|||||||
@@ -5,10 +5,21 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
|
||||||
|
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||||
|
|
||||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||||
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
||||||
|
else
|
||||||
|
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
|
||||||
|
|
||||||
|
for file in /etc/dockervolumebackup/conf.d/*; do
|
||||||
|
source $file
|
||||||
|
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||||
|
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
|
||||||
|
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Starting cron in foreground."
|
echo "Starting cron in foreground."
|
||||||
crond -f -l 8
|
crond -f -l 8
|
||||||
|
|||||||
38
go.mod
38
go.mod
@@ -1,24 +1,26 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containrrr/shoutrrr v0.5.2
|
github.com/containrrr/shoutrrr v0.5.2
|
||||||
|
github.com/cosiner/argv v0.1.0
|
||||||
github.com/docker/docker v20.10.11+incompatible
|
github.com/docker/docker v20.10.11+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/m90/targz v0.0.0-20220208141135-d3baeef59a97
|
|
||||||
github.com/minio/minio-go/v7 v7.0.16
|
github.com/minio/minio-go/v7 v7.0.16
|
||||||
github.com/otiai10/copy v1.7.0
|
github.com/otiai10/copy v1.7.0
|
||||||
|
github.com/pkg/sftp v1.13.5
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f
|
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
|
||||||
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/containerd/containerd v1.5.5 // indirect
|
github.com/containerd/containerd v1.6.6 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
@@ -26,33 +28,39 @@ require (
|
|||||||
github.com/fatih/color v1.10.0 // indirect
|
github.com/fatih/color v1.10.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.0 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/gorilla/mux v1.7.3 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.13.6 // indirect
|
github.com/klauspost/compress v1.15.6 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/nxadm/tail v1.4.6 // indirect
|
github.com/nxadm/tail v1.4.6 // indirect
|
||||||
github.com/onsi/ginkgo v1.14.2 // indirect
|
github.com/onsi/ginkgo v1.14.2 // indirect
|
||||||
github.com/onsi/gomega v1.10.3 // indirect
|
github.com/onsi/gomega v1.10.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.3.0 // indirect
|
github.com/rs/xid v1.3.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/text v0.3.6 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
|
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
|
||||||
google.golang.org/grpc v1.33.2 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.26.0 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
gopkg.in/ini.v1 v1.65.0 // indirect
|
gopkg.in/ini.v1 v1.65.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ docker run --rm \
|
|||||||
--env BACKUP_FILENAME=test.tar.gz \
|
--env BACKUP_FILENAME=test.tar.gz \
|
||||||
--env "BACKUP_FROM_SNAPSHOT=true" \
|
--env "BACKUP_FROM_SNAPSHOT=true" \
|
||||||
--entrypoint backup \
|
--entrypoint backup \
|
||||||
offen/docker-volume-backup:$TEST_VERSION
|
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
|
|||||||
1
test/commands/.gitignore
vendored
Normal file
1
test/commands/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
36
test/commands/docker-compose.yml
Normal file
36
test/commands/docker-compose.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
database:
|
||||||
|
image: mariadb:10.7
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
MARIADB_ROOT_PASSWORD: test
|
||||||
|
MARIADB_DATABASE: backup
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
|
||||||
|
- docker-volume-backup.exec-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
|
||||||
|
- docker-volume-backup.exec-label=test
|
||||||
|
volumes:
|
||||||
|
- app_data:/tmp/volume
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
EXEC_LABEL: test
|
||||||
|
EXEC_FORWARD_OUTPUT: "true"
|
||||||
|
volumes:
|
||||||
|
- archive:/archive
|
||||||
|
- app_data:/backup/data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
|
archive:
|
||||||
62
test/commands/run.sh
Normal file
62
test/commands/run.sh
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 30 # mariadb likes to take a bit before responding
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
|
||||||
|
|
||||||
|
tar -xvf ./local/test.tar.gz
|
||||||
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
|
echo "[TEST:FAIL] Could not find file written by pre command."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Found expected file."
|
||||||
|
|
||||||
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
|
echo "[TEST:FAIL] File created in post command was present in backup."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Did not find unexpected file."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
sudo rm -rf ./local
|
||||||
|
|
||||||
|
|
||||||
|
echo "[TEST:INFO] Running commands test in swarm mode next."
|
||||||
|
|
||||||
|
docker swarm init
|
||||||
|
|
||||||
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
|
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
|
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
|
||||||
|
|
||||||
|
tar -xvf ./local/test.tar.gz
|
||||||
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
|
echo "[TEST:FAIL] Could not find file written by pre command."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Found expected file."
|
||||||
|
|
||||||
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
|
echo "[TEST:FAIL] File created in post command was present in backup."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Did not find unexpected file."
|
||||||
|
|
||||||
|
docker stack rm test_stack
|
||||||
|
docker swarm leave --force
|
||||||
@@ -21,12 +21,24 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- webdav_backup_data:/var/lib/dav
|
- webdav_backup_data:/var/lib/dav
|
||||||
|
|
||||||
backup: &default_backup_service
|
ssh:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- USER_NAME=test
|
||||||
|
volumes:
|
||||||
|
- ./id_rsa.pub:/config/.ssh/authorized_keys
|
||||||
|
- ssh_backup_data:/tmp
|
||||||
|
- ssh_config:/config
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
hostname: hostnametoken
|
hostname: hostnametoken
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
- webdav
|
- webdav
|
||||||
|
- ssh
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID: test
|
AWS_ACCESS_KEY_ID: test
|
||||||
@@ -43,11 +55,18 @@ services:
|
|||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
GPG_PASSPHRASE: 1234secret
|
GPG_PASSPHRASE: 1234secret
|
||||||
WEBDAV_URL: http://webdav/
|
WEBDAV_URL: http://webdav/
|
||||||
|
WEBDAV_URL_INSECURE: 'true'
|
||||||
WEBDAV_PATH: /my/new/path/
|
WEBDAV_PATH: /my/new/path/
|
||||||
WEBDAV_USERNAME: test
|
WEBDAV_USERNAME: test
|
||||||
WEBDAV_PASSWORD: test
|
WEBDAV_PASSWORD: test
|
||||||
|
SSH_HOST_NAME: ssh
|
||||||
|
SSH_PORT: 2222
|
||||||
|
SSH_USER: test
|
||||||
|
SSH_REMOTE_PATH: /tmp
|
||||||
|
SSH_IDENTITY_PASSPHRASE: test1234
|
||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
- ./local:/archive
|
||||||
|
- ./id_rsa:/root/.ssh/id_rsa
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
@@ -61,4 +80,6 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
minio_backup_data:
|
minio_backup_data:
|
||||||
webdav_backup_data:
|
webdav_backup_data:
|
||||||
|
ssh_backup_data:
|
||||||
|
ssh_config:
|
||||||
app_data:
|
app_data:
|
||||||
|
|||||||
@@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
mkdir -p local
|
mkdir -p local
|
||||||
|
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
|
||||||
|
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
@@ -15,7 +16,7 @@ docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
|||||||
docker-compose exec backup backup
|
docker-compose exec backup backup
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then
|
if [ "$(docker-compose ps -q | wc -l)" != "5" ]; then
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
docker-compose ps
|
docker-compose ps
|
||||||
exit 1
|
exit 1
|
||||||
@@ -25,10 +26,12 @@ echo "[TEST:PASS] All containers running post backup."
|
|||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v compose_minio_backup_data:/minio_data \
|
-v compose_minio_backup_data:/minio_data \
|
||||||
-v compose_webdav_backup_data:/webdav_data alpine \
|
-v compose_webdav_backup_data:/webdav_data \
|
||||||
|
-v compose_ssh_backup_data:/ssh_data alpine \
|
||||||
ash -c 'apk add gnupg && \
|
ash -c 'apk add gnupg && \
|
||||||
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
|
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
|
||||||
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
|
||||||
|
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /ssh_data/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
|
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
|
||||||
|
|
||||||
@@ -52,9 +55,11 @@ docker-compose exec backup backup
|
|||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v compose_minio_backup_data:/minio_data \
|
-v compose_minio_backup_data:/minio_data \
|
||||||
-v compose_webdav_backup_data:/webdav_data alpine \
|
-v compose_webdav_backup_data:/webdav_data \
|
||||||
|
-v compose_ssh_backup_data:/ssh_data alpine \
|
||||||
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
|
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
|
||||||
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
|
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ] && \
|
||||||
|
[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
echo "[TEST:PASS] Remote backups have not been deleted."
|
echo "[TEST:PASS] Remote backups have not been deleted."
|
||||||
|
|
||||||
@@ -66,3 +71,4 @@ fi
|
|||||||
echo "[TEST:PASS] Local backups have not been deleted."
|
echo "[TEST:PASS] Local backups have not been deleted."
|
||||||
|
|
||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
|
rm -f id_rsa id_rsa.pub
|
||||||
|
|||||||
1
test/confd/.gitignore
vendored
Normal file
1
test/confd/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
2
test/confd/01backup.env
Normal file
2
test/confd/01backup.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="conf.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
2
test/confd/02backup.env
Normal file
2
test/confd/02backup.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="other.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
2
test/confd/03never.env
Normal file
2
test/confd/03never.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="never.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||||
23
test/confd/docker-compose.yml
Normal file
23
test/confd/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||||
|
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||||
|
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
32
test/confd/run.sh
Executable file
32
test/confd/run.sh
Executable file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||||
|
sleep 100
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
if [ ! -f ./local/conf.tar.gz ]; then
|
||||||
|
echo "[TEST:FAIL] Config from file was not used."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Config from file was used."
|
||||||
|
|
||||||
|
if [ ! -f ./local/other.tar.gz ]; then
|
||||||
|
echo "[TEST:FAIL] Run on same schedule did not succeed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Run on same schedule succeeded."
|
||||||
|
|
||||||
|
if [ -f ./local/never.tar.gz ]; then
|
||||||
|
echo "[TEST:FAIL] Unexpected file was found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Unexpected cron did not run."
|
||||||
1
test/ignore/.gitignore
vendored
Normal file
1
test/ignore/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
15
test/ignore/docker-compose.yml
Normal file
15
test/ignore/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- ./sources:/backup/data:ro
|
||||||
27
test/ignore/run.sh
Normal file
27
test/ignore/run.sh
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
out=$(mktemp -d)
|
||||||
|
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
|
||||||
|
|
||||||
|
if [ ! -f "$out/backup/data/me.txt" ]; then
|
||||||
|
echo "[TEST:FAIL] Expected file was not found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Expected file was found."
|
||||||
|
|
||||||
|
if [ -f "$out/backup/data/skip.me" ]; then
|
||||||
|
echo "[TEST:FAIL] Ignored file was found."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Ignored file was not found."
|
||||||
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/skip.me
Normal file
0
test/ignore/sources/skip.me
Normal file
@@ -1,8 +1,8 @@
|
|||||||
version: '3'
|
version: '3'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup: &default_backup_service
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
|||||||
1
test/ownership/.gitignore
vendored
Normal file
1
test/ownership/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
27
test/ownership/docker-compose.yml
Normal file
27
test/ownership/docker-compose.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
image: postgres:14-alpine
|
||||||
|
restart: unless-stopped
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
- POSTGRES_PASSWORD=1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
|
||||||
|
- POSTGRES_USER=test
|
||||||
|
- POSTGRES_DB=test
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: backup.tar.gz
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/backup/postgres:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- ./local:/archive
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
28
test/ownership/run.sh
Normal file
28
test/ownership/run.sh
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# This test refers to https://github.com/offen/docker-volume-backup/issues/71
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sudo tar --same-owner -xvf ./local/backup.tar.gz -C /tmp
|
||||||
|
|
||||||
|
sudo find /tmp/backup/postgres > /dev/null
|
||||||
|
echo "[TEST:PASS] Backup contains files at expected location"
|
||||||
|
|
||||||
|
for file in $(sudo find /tmp/backup/postgres); do
|
||||||
|
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
|
||||||
|
echo "[TEST:FAIL] Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "[TEST:PASS] All files and directories in backup preserved their ownership."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
@@ -18,8 +18,8 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- backup_data:/data
|
- backup_data:/data
|
||||||
|
|
||||||
backup: &default_backup_service
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
deploy:
|
deploy:
|
||||||
@@ -43,13 +43,15 @@ services:
|
|||||||
image: offen/offen:latest
|
image: offen/offen:latest
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.stop-during-backup=true
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
healthcheck:
|
||||||
|
disable: true
|
||||||
deploy:
|
deploy:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
pg:
|
pg:
|
||||||
image: postgres:12.2-alpine
|
image: postgres:14-alpine
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_PASSWORD: example
|
POSTGRES_PASSWORD: example
|
||||||
labels:
|
labels:
|
||||||
|
|||||||
Reference in New Issue
Block a user