mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b441cf3e2b | ||
|
|
82f66565da | ||
|
|
d68814be9d | ||
|
|
3661a4b49b | ||
|
|
e738bd0539 | ||
|
|
342ae5910e | ||
|
|
c2a8cc92fc | ||
|
|
1892d56ff6 | ||
|
|
0b205fe6dc | ||
|
|
8c8a2fa088 | ||
|
|
a850bf13fe | ||
|
|
b52b271bac | ||
|
|
cac5777e79 | ||
|
|
94a1edc4ad | ||
|
|
a654097e59 | ||
|
|
1b1fc4856c | ||
|
|
e81c34b8fc | ||
|
|
9c23767fce | ||
|
|
51af8c3c77 | ||
|
|
1ea0b51b23 | ||
|
|
da8c63f755 | ||
|
|
9bc8db0f7c | ||
|
|
508bc07b4f | ||
|
|
b8f71b04a1 |
@@ -5,6 +5,7 @@ jobs:
|
|||||||
machine:
|
machine:
|
||||||
image: ubuntu-2004:202201-02
|
image: ubuntu-2004:202201-02
|
||||||
working_directory: ~/docker-volume-backup
|
working_directory: ~/docker-volume-backup
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@@ -29,6 +30,7 @@ jobs:
|
|||||||
DOCKER_BUILDKIT: '1'
|
DOCKER_BUILDKIT: '1'
|
||||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||||
working_directory: ~/docker-volume-backup
|
working_directory: ~/docker-volume-backup
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- setup_remote_docker:
|
- setup_remote_docker:
|
||||||
@@ -48,6 +50,7 @@ jobs:
|
|||||||
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
||||||
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
||||||
|
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
|
||||||
fi
|
fi
|
||||||
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||||
$tag_args . --push
|
$tag_args . --push
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.17-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
|
|||||||
298
README.md
298
README.md
@@ -7,7 +7,7 @@
|
|||||||
Backup Docker volumes locally or to any S3 compatible storage.
|
Backup Docker volumes locally or to any S3 compatible storage.
|
||||||
|
|
||||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
||||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||||
|
|
||||||
<!-- MarkdownTOC -->
|
<!-- MarkdownTOC -->
|
||||||
|
|
||||||
@@ -20,20 +20,25 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
||||||
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
||||||
- [Customize notifications](#customize-notifications)
|
- [Customize notifications](#customize-notifications)
|
||||||
- [Run custom commands before / after backup](#run-custom-commands-before--after-backup)
|
- [Run custom commands during the backup lifecycle](#run-custom-commands-during-the-backup-lifecycle)
|
||||||
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
||||||
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
||||||
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
||||||
- [Using with Docker Swarm](#using-with-docker-swarm)
|
- [Using with Docker Swarm](#using-with-docker-swarm)
|
||||||
- [Manually triggering a backup](#manually-triggering-a-backup)
|
- [Manually triggering a backup](#manually-triggering-a-backup)
|
||||||
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
||||||
|
- [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage)
|
||||||
|
- [Replace deprecated `exec-pre` and `exec-post` labels](#replace-deprecated-exec-pre-and-exec-post-labels)
|
||||||
- [Using a custom Docker host](#using-a-custom-docker-host)
|
- [Using a custom Docker host](#using-a-custom-docker-host)
|
||||||
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
|
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
|
||||||
|
- [Define different retention schedules](#define-different-retention-schedules)
|
||||||
|
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
|
||||||
- [Recipes](#recipes)
|
- [Recipes](#recipes)
|
||||||
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
||||||
- [Backing up to Filebase](#backing-up-to-filebase)
|
- [Backing up to Filebase](#backing-up-to-filebase)
|
||||||
- [Backing up to MinIO](#backing-up-to-minio)
|
- [Backing up to MinIO](#backing-up-to-minio)
|
||||||
- [Backing up to WebDAV](#backing-up-to-webdav)
|
- [Backing up to WebDAV](#backing-up-to-webdav)
|
||||||
|
- [Backing up to SSH](#backing-up-to-ssh)
|
||||||
- [Backing up locally](#backing-up-locally)
|
- [Backing up locally](#backing-up-locally)
|
||||||
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
||||||
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
||||||
@@ -107,7 +112,7 @@ docker run --rm \
|
|||||||
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
||||||
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
||||||
--entrypoint backup \
|
--entrypoint backup \
|
||||||
offen/docker-volume-backup:latest
|
offen/docker-volume-backup:v2
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
@@ -149,6 +154,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
||||||
|
|
||||||
|
# ************************************************************************
|
||||||
|
# The BACKUP_FROM_SNAPSHOT option has been deprecated and will be removed
|
||||||
|
# in the next major version. Please use exec-pre and exec-post
|
||||||
|
# as documented below instead.
|
||||||
|
# ************************************************************************
|
||||||
# Whether to copy the content of backup folder before creating the tar archive.
|
# Whether to copy the content of backup folder before creating the tar archive.
|
||||||
# In the rare scenario where the content of the source backup volume is continously
|
# In the rare scenario where the content of the source backup volume is continously
|
||||||
# updating, but we do not wish to stop the container while performing the backup,
|
# updating, but we do not wish to stop the container while performing the backup,
|
||||||
@@ -161,6 +171,12 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_SOURCES="/other/location"
|
# BACKUP_SOURCES="/other/location"
|
||||||
|
|
||||||
|
# When given, all files in BACKUP_SOURCES whose full path matches the given
|
||||||
|
# regular expression will be excluded from the archive. Regular Expressions
|
||||||
|
# can be used as from the Go standard library https://pkg.go.dev/regexp
|
||||||
|
|
||||||
|
# BACKUP_EXCLUDE_REGEXP="\.log$"
|
||||||
|
|
||||||
########### BACKUP STORAGE
|
########### BACKUP STORAGE
|
||||||
|
|
||||||
# The name of the remote bucket that should be used for storing backups. If
|
# The name of the remote bucket that should be used for storing backups. If
|
||||||
@@ -201,12 +217,17 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# AWS_ENDPOINT_PROTO="https"
|
# AWS_ENDPOINT_PROTO="https"
|
||||||
|
|
||||||
# Setting this variable to `true` will disable verification of
|
# Setting this variable to `true` will disable verification of
|
||||||
# SSL certificates. You shouldn't use this unless you use self-signed
|
# SSL certificates for AWS_ENDPOINT. You shouldn't use this unless you use
|
||||||
# certificates for your remote storage backend. This can only be used
|
# self-signed certificates for your remote storage backend. This can only be
|
||||||
# when AWS_ENDPOINT_PROTO is set to `https`.
|
# used when AWS_ENDPOINT_PROTO is set to `https`.
|
||||||
|
|
||||||
# AWS_ENDPOINT_INSECURE="true"
|
# AWS_ENDPOINT_INSECURE="true"
|
||||||
|
|
||||||
|
# Setting this variable will change the S3 storage class header.
|
||||||
|
# Defaults to "STANDARD", you can set this value according to your needs.
|
||||||
|
|
||||||
|
# AWS_STORAGE_CLASS="GLACIER"
|
||||||
|
|
||||||
# You can also backup files to any WebDAV server:
|
# You can also backup files to any WebDAV server:
|
||||||
|
|
||||||
# The URL of the remote WebDAV server
|
# The URL of the remote WebDAV server
|
||||||
@@ -226,6 +247,46 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# WEBDAV_PASSWORD="password"
|
# WEBDAV_PASSWORD="password"
|
||||||
|
|
||||||
|
# Setting this variable to `true` will disable verification of
|
||||||
|
# SSL certificates for WEBDAV_URL. You shouldn't use this unless you use
|
||||||
|
# self-signed certificates for your remote storage backend.
|
||||||
|
|
||||||
|
# WEBDAV_URL_INSECURE="true"
|
||||||
|
|
||||||
|
# You can also backup files to any SSH server:
|
||||||
|
|
||||||
|
# The URL of the remote SSH server
|
||||||
|
|
||||||
|
# SSH_HOST_NAME="server.local"
|
||||||
|
|
||||||
|
# The port of the remote SSH server
|
||||||
|
# Optional variable default value is `22`
|
||||||
|
|
||||||
|
# SSH_PORT=2222
|
||||||
|
|
||||||
|
# The Directory to place the backups to on the SSH server.
|
||||||
|
|
||||||
|
# SSH_REMOTE_PATH="/my/directory/"
|
||||||
|
|
||||||
|
# The username for the SSH server
|
||||||
|
|
||||||
|
# SSH_USER="user"
|
||||||
|
|
||||||
|
# The password for the SSH server
|
||||||
|
|
||||||
|
# SSH_PASSWORD="password"
|
||||||
|
|
||||||
|
# The private key path in container for SSH server
|
||||||
|
# Default value: /root/.ssh/id_rsa
|
||||||
|
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
|
||||||
|
# also work.
|
||||||
|
|
||||||
|
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
|
||||||
|
|
||||||
|
# The passphrase for the identity file
|
||||||
|
|
||||||
|
# SSH_IDENTITY_PASSPHRASE="pass"
|
||||||
|
|
||||||
# In addition to storing backups remotely, you can also keep local copies.
|
# In addition to storing backups remotely, you can also keep local copies.
|
||||||
# Pass a container-local path to store your backups if needed. You also need to
|
# Pass a container-local path to store your backups if needed. You also need to
|
||||||
# mount a local folder or Docker volume into that location (`/archive`
|
# mount a local folder or Docker volume into that location (`/archive`
|
||||||
@@ -291,7 +352,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# It is possible to define commands to be run in any container before and after
|
# It is possible to define commands to be run in any container before and after
|
||||||
# a backup is conducted. The commands themselves are defined in labels like
|
# a backup is conducted. The commands themselves are defined in labels like
|
||||||
# `docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
|
# `docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
|
||||||
# Several options exist for controlling this feature:
|
# Several options exist for controlling this feature:
|
||||||
|
|
||||||
# By default, any output of such a command is suppressed. If this value
|
# By default, any output of such a command is suppressed. If this value
|
||||||
@@ -335,6 +396,16 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# DOCKER_HOST="tcp://docker_socket_proxy:2375"
|
# DOCKER_HOST="tcp://docker_socket_proxy:2375"
|
||||||
|
|
||||||
|
########### LOCK_TIMEOUT
|
||||||
|
|
||||||
|
# In the case of overlapping cron schedules run by the same container,
|
||||||
|
# subsequent invocations will wait for previous runs to finish before starting.
|
||||||
|
# By default, this will time out and fail in case the lock could not be acquired
|
||||||
|
# after 60 minutes. In case you need to adjust this timeout, supply a duration
|
||||||
|
# value as per https://pkg.go.dev/time#ParseDuration to `LOCK_TIMEOUT`
|
||||||
|
|
||||||
|
# LOCK_TIMEOUT="60m"
|
||||||
|
|
||||||
########### EMAIL NOTIFICATIONS
|
########### EMAIL NOTIFICATIONS
|
||||||
|
|
||||||
# ************************************************************************
|
# ************************************************************************
|
||||||
@@ -366,7 +437,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# EMAIL_SMTP_PORT="<port>"
|
# EMAIL_SMTP_PORT="<port>"
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`].
|
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`][compose-issue].
|
||||||
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
||||||
|
|
||||||
[compose-issue]: https://github.com/docker/compose/issues/2854
|
[compose-issue]: https://github.com/docker/compose/issues/2854
|
||||||
@@ -391,7 +462,7 @@ services:
|
|||||||
- docker-volume-backup.stop-during-backup=service1
|
- docker-volume-backup.stop-during-backup=service1
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_STOP_CONTAINER_LABEL: service1
|
BACKUP_STOP_CONTAINER_LABEL: service1
|
||||||
volumes:
|
volumes:
|
||||||
@@ -414,7 +485,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_PRUNING_PREFIX: backup-
|
BACKUP_PRUNING_PREFIX: backup-
|
||||||
@@ -437,7 +508,7 @@ version: '3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
# ... other configuration values go here
|
# ... other configuration values go here
|
||||||
NOTIFICATION_URLS=smtp://me:secret@smtp.example.com:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
NOTIFICATION_URLS=smtp://me:secret@smtp.example.com:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
||||||
@@ -473,11 +544,16 @@ Overridable template names are: `title_success`, `body_success`, `title_failure`
|
|||||||
|
|
||||||
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
|
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
|
||||||
|
|
||||||
### Run custom commands before / after backup
|
### Run custom commands during the backup lifecycle
|
||||||
|
|
||||||
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||||
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container.
|
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
||||||
Such commands are defined by specifying the command in a `docker-volume-backup.exec-[pre|post]` label.
|
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecyle:
|
||||||
|
|
||||||
|
- `archive` (the tar archive is created)
|
||||||
|
- `process` (the tar archive is processed, e.g. encrypted - optional)
|
||||||
|
- `copy` (the tar archive is copied to all configured storages)
|
||||||
|
- `prune` (existing backups are pruned based on the defined ruleset - optional)
|
||||||
|
|
||||||
Taking a database dump using `mysqldump` would look like this:
|
Taking a database dump using `mysqldump` would look like this:
|
||||||
|
|
||||||
@@ -491,7 +567,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- backup_data:/tmp/backups
|
- backup_data:/tmp/backups
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
|
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backup_data:
|
backup_data:
|
||||||
@@ -511,11 +587,11 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- backup_data:/tmp/backups
|
- backup_data:/tmp/backups
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
|
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
|
||||||
- docker-volume-backup.exec-label=database
|
- docker-volume-backup.exec-label=database
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
EXEC_LABEL: database
|
EXEC_LABEL: database
|
||||||
volumes:
|
volumes:
|
||||||
@@ -527,7 +603,7 @@ volumes:
|
|||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
The backup procedure is guaranteed to wait for all `pre` commands to finish.
|
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
|
||||||
However there are no guarantees about the order in which they are run, which could also happen concurrently.
|
However there are no guarantees about the order in which they are run, which could also happen concurrently.
|
||||||
|
|
||||||
### Encrypting your backup using GPG
|
### Encrypting your backup using GPG
|
||||||
@@ -592,7 +668,7 @@ version: '3'
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /etc/timezone:/etc/timezone:ro
|
- /etc/timezone:/etc/timezone:ro
|
||||||
@@ -615,7 +691,7 @@ When running in Swarm mode, it's also advised to set a hard memory limit on your
|
|||||||
```yml
|
```yml
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
deployment:
|
deployment:
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
@@ -650,6 +726,54 @@ After:
|
|||||||
NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.com&toAddresses=you@example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
|
||||||
|
|
||||||
|
Starting with version 2.15.0, the `BACKUP_FROM_SNAPSHOT` feature has been deprecated.
|
||||||
|
If you need to prepare your sources before the backup is taken, use `archive-pre`, `archive-post` and an intermediate volume:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
my_app:
|
||||||
|
build: .
|
||||||
|
volumes:
|
||||||
|
- data:/var/my_app
|
||||||
|
- backup:/tmp/backup
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
|
||||||
|
- docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
environment:
|
||||||
|
BACKUP_SOURCES: /tmp/backup
|
||||||
|
volumes:
|
||||||
|
- backup:/backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
backup:
|
||||||
|
```
|
||||||
|
|
||||||
|
### Replace deprecated `exec-pre` and `exec-post` labels
|
||||||
|
|
||||||
|
Version 2.19.0 introduced the option to run labeled commands at multiple points in time during the backup lifecycle.
|
||||||
|
In order to be able to use more obvious terminology in the new labels, the existing `exec-pre` and `exec-post` labels have been deprecated.
|
||||||
|
If you want to emulate the existing behavior, all you need to do is change `exec-pre` to `archive-pre` and `exec-post` to `archive-post`:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
labels:
|
||||||
|
- - docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
|
||||||
|
+ - docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
|
||||||
|
- - docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
|
||||||
|
+ - docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
|
||||||
|
```
|
||||||
|
|
||||||
|
The `EXEC_LABEL` setting and the `docker-volume-backup.exec-label` label stay as is.
|
||||||
|
Check the additional documentation on running commands during the backup lifecycle to find out about further possibilities.
|
||||||
|
|
||||||
### Using a custom Docker host
|
### Using a custom Docker host
|
||||||
|
|
||||||
If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL.
|
If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL.
|
||||||
@@ -669,7 +793,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
@@ -681,10 +805,79 @@ volumes:
|
|||||||
|
|
||||||
A separate cronjob will be created for each config file.
|
A separate cronjob will be created for each config file.
|
||||||
If a configuration value is set both in the global environment as well as in the config file, the config file will take precedence.
|
If a configuration value is set both in the global environment as well as in the config file, the config file will take precedence.
|
||||||
The `backup` command expects to run on an exclusive lock, so it is your responsibility to make sure the invocations do not overlap.
|
The `backup` command expects to run on an exclusive lock, so in case you provide the same or overlapping schedules in your cron expressions, the runs will still be executed serially, one after the other.
|
||||||
|
The exact order of schedules that use the same cron expression is not specified.
|
||||||
In case you need your schedules to overlap, you need to create a dedicated container for each schedule instead.
|
In case you need your schedules to overlap, you need to create a dedicated container for each schedule instead.
|
||||||
When changing the configuration, you currently need to manually restart the container for the changes to take effect.
|
When changing the configuration, you currently need to manually restart the container for the changes to take effect.
|
||||||
|
|
||||||
|
Set `BACKUP_SOURCES` for each config file to control which subset of volume mounts gets backed up:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
# With a volume configuration like this:
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- ./configuration:/etc/dockervolumebackup/conf.d
|
||||||
|
- app1_data:/backup/app1_data:ro
|
||||||
|
- app2_data:/backup/app2_data:ro
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# In the 1st config file:
|
||||||
|
BACKUP_SOURCES=/backup/app1_data
|
||||||
|
|
||||||
|
# In the 2nd config file:
|
||||||
|
BACKUP_SOURCES=/backup/app2_data
|
||||||
|
```
|
||||||
|
|
||||||
|
### Define different retention schedules
|
||||||
|
|
||||||
|
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
|
||||||
|
|
||||||
|
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 01daily.conf
|
||||||
|
BACKUP_FILENAME="daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every day at 2am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
BACKUP_PRUNING_PREFIX="daily-backup-"
|
||||||
|
BACKUP_RETENTION_DAYS="7"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 02weekly.conf
|
||||||
|
BACKUP_FILENAME="weekly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every monday at 3am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 3 * * 1"
|
||||||
|
BACKUP_PRUNING_PREFIX="weekly-backup-"
|
||||||
|
BACKUP_RETENTION_DAYS="31"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
# 03monthly.conf
|
||||||
|
BACKUP_FILENAME="monthly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
# run every 1st of a month at 4am
|
||||||
|
BACKUP_CRON_EXPRESSION="0 4 1 * *"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that while it's possible to define colliding cron schedules for each of these configurations, you might need to adjust the value for `LOCK_TIMEOUT` in case your backups are large and might take longer than an hour.
|
||||||
|
|
||||||
|
### Use special characters in notification URLs
|
||||||
|
|
||||||
|
The value given to `NOTIFICATION_URLS` is a comma separated list of URLs.
|
||||||
|
If such a URL contains special characters (e.g. commas) it needs to be URL encoded.
|
||||||
|
To get an encoded version of your URL, you can use the CLI tool provided by `shoutrrr` (which is the library used for sending notifications):
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run --rm -ti containrrr/shoutrrr generate [service]
|
||||||
|
```
|
||||||
|
|
||||||
|
where service is any of the [supported services][shoutrrr-docs], e.g. for SMTP:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run --rm -ti containrrr/shoutrrr generate smtp
|
||||||
|
```
|
||||||
|
|
||||||
## Recipes
|
## Recipes
|
||||||
|
|
||||||
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
||||||
@@ -697,9 +890,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -718,10 +911,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_ENDPOINT: s3.filebase.com
|
AWS_ENDPOINT: s3.filebase.com
|
||||||
AWS_BUCKET_NAME: filebase-bucket
|
AWS_S3_BUCKET_NAME: filebase-bucket
|
||||||
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
|
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
|
||||||
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
|
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -740,10 +933,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_ENDPOINT: minio.example.com
|
AWS_ENDPOINT: minio.example.com
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: MINIOACCESSKEY
|
AWS_ACCESS_KEY_ID: MINIOACCESSKEY
|
||||||
AWS_SECRET_ACCESS_KEY: MINIOSECRETKEY
|
AWS_SECRET_ACCESS_KEY: MINIOSECRETKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -762,7 +955,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
WEBDAV_URL: https://webdav.mydomain.me
|
WEBDAV_URL: https://webdav.mydomain.me
|
||||||
WEBDAV_PATH: /my/directory/
|
WEBDAV_PATH: /my/directory/
|
||||||
@@ -776,6 +969,29 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Backing up to SSH
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
SSH_HOST_NAME: server.local
|
||||||
|
SSH_PORT: 2222
|
||||||
|
SSH_USER: user
|
||||||
|
SSH_REMOTE_PATH: /data
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- /path/to/private_key:/root/.ssh/id
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Backing up locally
|
### Backing up locally
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
@@ -784,7 +1000,7 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
|
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
|
||||||
@@ -805,9 +1021,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -827,11 +1043,11 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
# take a backup on every hour
|
# take a backup on every hour
|
||||||
BACKUP_CRON_EXPRESSION: "0 * * * *"
|
BACKUP_CRON_EXPRESSION: "0 * * * *"
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
volumes:
|
volumes:
|
||||||
@@ -850,9 +1066,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
@@ -874,9 +1090,9 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
GPG_PASSPHRASE: somesecretstring
|
GPG_PASSPHRASE: somesecretstring
|
||||||
@@ -901,7 +1117,7 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- app_data:/tmp/dumps
|
- app_data:/tmp/dumps
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: db.tar.gz
|
BACKUP_FILENAME: db.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
||||||
@@ -922,10 +1138,10 @@ version: '3'
|
|||||||
services:
|
services:
|
||||||
# ... define other services using the `data_1` and `data_2` volumes here
|
# ... define other services using the `data_1` and `data_2` volumes here
|
||||||
backup_1: &backup_service
|
backup_1: &backup_service
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:v2
|
||||||
environment: &backup_environment
|
environment: &backup_environment
|
||||||
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
BACKUP_CRON_EXPRESSION: "0 2 * * *"
|
||||||
AWS_BUCKET_NAME: backup-bucket
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
|
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
|
||||||
|
|||||||
@@ -11,14 +11,13 @@ import (
|
|||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createArchive(inputFilePath, outputFilePath string) error {
|
func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
||||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -28,7 +27,7 @@ func createArchive(inputFilePath, outputFilePath string) error {
|
|||||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compress(inputFilePath, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,7 +51,7 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
|||||||
return inputFilePath, outputFilePath, err
|
return inputFilePath, outputFilePath, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func compress(inPath, outFilePath, subPath string) error {
|
func compress(paths []string, outFilePath, subPath string) error {
|
||||||
file, err := os.Create(outFilePath)
|
file, err := os.Create(outFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||||
@@ -62,14 +61,6 @@ func compress(inPath, outFilePath, subPath string) error {
|
|||||||
gzipWriter := gzip.NewWriter(file)
|
gzipWriter := gzip.NewWriter(file)
|
||||||
tarWriter := tar.NewWriter(gzipWriter)
|
tarWriter := tar.NewWriter(gzipWriter)
|
||||||
|
|
||||||
var paths []string
|
|
||||||
if err := filepath.WalkDir(inPath, func(path string, di fs.DirEntry, err error) error {
|
|
||||||
paths = append(paths, path)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
||||||
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
|
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
|
||||||
|
|||||||
@@ -3,7 +3,11 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
// by users.
|
// by users.
|
||||||
@@ -18,11 +22,13 @@ type Config struct {
|
|||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||||
BackupFromSnapshot bool `split_words:"true"`
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
AwsS3BucketName string `split_words:"true"`
|
AwsS3BucketName string `split_words:"true"`
|
||||||
AwsS3Path string `split_words:"true"`
|
AwsS3Path string `split_words:"true"`
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
AwsEndpointProto string `split_words:"true" default:"https"`
|
AwsEndpointProto string `split_words:"true" default:"https"`
|
||||||
AwsEndpointInsecure bool `split_words:"true"`
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
|
AwsStorageClass string `split_words:"true"`
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
AwsIamRoleEndpoint string `split_words:"true"`
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
@@ -36,9 +42,34 @@ type Config struct {
|
|||||||
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
WebdavUrl string `split_words:"true"`
|
WebdavUrl string `split_words:"true"`
|
||||||
|
WebdavUrlInsecure bool `split_words:"true"`
|
||||||
WebdavPath string `split_words:"true" default:"/"`
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
WebdavUsername string `split_words:"true"`
|
WebdavUsername string `split_words:"true"`
|
||||||
WebdavPassword string `split_words:"true"`
|
WebdavPassword string `split_words:"true"`
|
||||||
|
SSHHostName string `split_words:"true"`
|
||||||
|
SSHPort string `split_words:"true" default:"22"`
|
||||||
|
SSHUser string `split_words:"true"`
|
||||||
|
SSHPassword string `split_words:"true"`
|
||||||
|
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
||||||
|
SSHIdentityPassphrase string `split_words:"true"`
|
||||||
|
SSHRemotePath string `split_words:"true"`
|
||||||
ExecLabel string `split_words:"true"`
|
ExecLabel string `split_words:"true"`
|
||||||
ExecForwardOutput bool `split_words:"true"`
|
ExecForwardOutput bool `split_words:"true"`
|
||||||
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegexpDecoder struct {
|
||||||
|
Re *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpDecoder) Decode(v string) error {
|
||||||
|
if v == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
re, err := regexp.Compile(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
||||||
|
}
|
||||||
|
*r = RegexpDecoder{Re: re}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,12 +13,12 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/cosiner/argv"
|
"github.com/cosiner/argv"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
|
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
|
||||||
@@ -93,33 +93,107 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var hasDeprecatedContainers bool
|
||||||
|
if label == "docker-volume-backup.archive-pre" {
|
||||||
|
f[0] = filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: "docker-volume-backup.exec-pre",
|
||||||
|
}
|
||||||
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
if len(deprecatedContainers) != 0 {
|
||||||
|
hasDeprecatedContainers = true
|
||||||
|
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if label == "docker-volume-backup.archive-post" {
|
||||||
|
f[0] = filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: "docker-volume-backup.exec-post",
|
||||||
|
}
|
||||||
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
if len(deprecatedContainers) != 0 {
|
||||||
|
hasDeprecatedContainers = true
|
||||||
|
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(containersWithCommand) == 0 {
|
if len(containersWithCommand) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
if hasDeprecatedContainers {
|
||||||
wg.Add(len(containersWithCommand))
|
s.logger.Warn(
|
||||||
|
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
g := new(errgroup.Group)
|
||||||
|
|
||||||
var cmdErrors []error
|
|
||||||
for _, container := range containersWithCommand {
|
for _, container := range containersWithCommand {
|
||||||
go func(c types.Container) {
|
c := container
|
||||||
cmd, _ := c.Labels[label]
|
g.Go(func() error {
|
||||||
|
cmd, ok := c.Labels[label]
|
||||||
|
if !ok && label == "docker-volume-backup.archive-pre" {
|
||||||
|
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
|
||||||
|
} else if !ok && label == "docker-volume-backup.archive-post" {
|
||||||
|
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
|
||||||
|
}
|
||||||
|
|
||||||
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
||||||
stdout, stderr, err := s.exec(c.ID, cmd)
|
stdout, stderr, err := s.exec(c.ID, cmd)
|
||||||
if err != nil {
|
|
||||||
cmdErrors = append(cmdErrors, err)
|
|
||||||
}
|
|
||||||
if s.c.ExecForwardOutput {
|
if s.c.ExecForwardOutput {
|
||||||
os.Stderr.Write(stderr)
|
os.Stderr.Write(stderr)
|
||||||
os.Stdout.Write(stdout)
|
os.Stdout.Write(stdout)
|
||||||
}
|
}
|
||||||
wg.Done()
|
if err != nil {
|
||||||
}(container)
|
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
if err := g.Wait(); err != nil {
|
||||||
if len(cmdErrors) != 0 {
|
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
||||||
return join(cmdErrors...)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type lifecyclePhase string
|
||||||
|
|
||||||
|
const (
|
||||||
|
lifecyclePhaseArchive lifecyclePhase = "archive"
|
||||||
|
lifecyclePhaseProcess lifecyclePhase = "process"
|
||||||
|
lifecyclePhaseCopy lifecyclePhase = "copy"
|
||||||
|
lifecyclePhasePrune lifecyclePhase = "prune"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
|
||||||
|
if s.cli == nil {
|
||||||
|
return cb
|
||||||
|
}
|
||||||
|
return func() error {
|
||||||
|
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||||
|
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
|
||||||
|
}()
|
||||||
|
return cb()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
58
cmd/backup/lock.go
Normal file
58
cmd/backup/lock.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gofrs/flock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
|
// caller invokes the returned release func. In case the lock is currently blocked
|
||||||
|
// by another execution, it will repeatedly retry until the lock is available
|
||||||
|
// or the given timeout is exceeded.
|
||||||
|
func (s *script) lock(lockfile string) (func() error, error) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
s.stats.LockedTime = time.Now().Sub(start)
|
||||||
|
}()
|
||||||
|
|
||||||
|
retry := time.NewTicker(5 * time.Second)
|
||||||
|
defer retry.Stop()
|
||||||
|
deadline := time.NewTimer(s.c.LockTimeout)
|
||||||
|
defer deadline.Stop()
|
||||||
|
|
||||||
|
fileLock := flock.New(lockfile)
|
||||||
|
|
||||||
|
for {
|
||||||
|
acquired, err := fileLock.TryLock()
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("lock: error trying lock: %w", err)
|
||||||
|
}
|
||||||
|
if acquired {
|
||||||
|
if s.encounteredLock {
|
||||||
|
s.logger.Info("Acquired exclusive lock on subsequent attempt, ready to continue.")
|
||||||
|
}
|
||||||
|
return fileLock.Unlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.encounteredLock {
|
||||||
|
s.logger.Infof(
|
||||||
|
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
||||||
|
s.c.LockTimeout,
|
||||||
|
)
|
||||||
|
s.encounteredLock = true
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-retry.C:
|
||||||
|
continue
|
||||||
|
case <-deadline.C:
|
||||||
|
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -8,14 +8,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
unlock := lock("/var/lock/dockervolumebackup.lock")
|
|
||||||
defer unlock()
|
|
||||||
|
|
||||||
s, err := newScript()
|
s, err := newScript()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
|
defer unlock()
|
||||||
|
s.must(err)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if pArg := recover(); pArg != nil {
|
if pArg := recover(); pArg != nil {
|
||||||
if err, ok := pArg.(error); ok {
|
if err, ok := pArg.(error); ok {
|
||||||
@@ -37,14 +38,7 @@ func main() {
|
|||||||
s.logger.Info("Finished running backup tasks.")
|
s.logger.Info("Finished running backup tasks.")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.must(func() error {
|
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
|
||||||
runPostCommands, err := s.runCommands()
|
|
||||||
defer func() {
|
|
||||||
s.must(runPostCommands())
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
restartContainers, err := s.stopContainers()
|
restartContainers, err := s.stopContainers()
|
||||||
// The mechanism for restarting containers is not using hooks as it
|
// The mechanism for restarting containers is not using hooks as it
|
||||||
// should happen as soon as possible (i.e. before uploading backups or
|
// should happen as soon as possible (i.e. before uploading backups or
|
||||||
@@ -55,10 +49,10 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return s.takeBackup()
|
return s.createArchive()
|
||||||
}())
|
})())
|
||||||
|
|
||||||
s.must(s.encryptBackup())
|
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
|
||||||
s.must(s.copyBackup())
|
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
|
||||||
s.must(s.pruneBackups())
|
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -82,6 +83,7 @@ var templateHelpers = template.FuncMap{
|
|||||||
"formatBytesBin": func(bytes uint64) string {
|
"formatBytesBin": func(bytes uint64) string {
|
||||||
return formatBytes(bytes, false)
|
return formatBytes(bytes, false)
|
||||||
},
|
},
|
||||||
|
"env": os.Getenv,
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatBytes converts an amount of bytes in a human-readable representation
|
// formatBytes converts an amount of bytes in a human-readable representation
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -27,9 +29,11 @@ import (
|
|||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
"github.com/otiai10/copy"
|
"github.com/otiai10/copy"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/studio-b12/gowebdav"
|
"github.com/studio-b12/gowebdav"
|
||||||
"golang.org/x/crypto/openpgp"
|
"golang.org/x/crypto/openpgp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
// script holds all the stateful information required to orchestrate a
|
// script holds all the stateful information required to orchestrate a
|
||||||
@@ -38,6 +42,8 @@ type script struct {
|
|||||||
cli *client.Client
|
cli *client.Client
|
||||||
minioClient *minio.Client
|
minioClient *minio.Client
|
||||||
webdavClient *gowebdav.Client
|
webdavClient *gowebdav.Client
|
||||||
|
sshClient *ssh.Client
|
||||||
|
sftpClient *sftp.Client
|
||||||
logger *logrus.Logger
|
logger *logrus.Logger
|
||||||
sender *router.ServiceRouter
|
sender *router.ServiceRouter
|
||||||
template *template.Template
|
template *template.Template
|
||||||
@@ -47,6 +53,8 @@ type script struct {
|
|||||||
file string
|
file string
|
||||||
stats *Stats
|
stats *Stats
|
||||||
|
|
||||||
|
encounteredLock bool
|
||||||
|
|
||||||
c *Config
|
c *Config
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,6 +152,66 @@ func newScript() (*script, error) {
|
|||||||
} else {
|
} else {
|
||||||
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
||||||
s.webdavClient = webdavClient
|
s.webdavClient = webdavClient
|
||||||
|
if s.c.WebdavUrlInsecure {
|
||||||
|
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
|
||||||
|
}
|
||||||
|
webdavTransport := defaultTransport.Clone()
|
||||||
|
webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure
|
||||||
|
s.webdavClient.SetTransport(webdavTransport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.SSHHostName != "" {
|
||||||
|
var authMethods []ssh.AuthMethod
|
||||||
|
|
||||||
|
if s.c.SSHPassword != "" {
|
||||||
|
authMethods = append(authMethods, ssh.Password(s.c.SSHPassword))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.SSHIdentityFile); err == nil {
|
||||||
|
key, err := ioutil.ReadFile(s.c.SSHIdentityFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error reading the private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
var signer ssh.Signer
|
||||||
|
if s.c.SSHIdentityPassphrase != "" {
|
||||||
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the encrypted private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
} else {
|
||||||
|
signer, err = ssh.ParsePrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sshClientConfig := &ssh.ClientConfig{
|
||||||
|
User: s.c.SSHUser,
|
||||||
|
Auth: authMethods,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}
|
||||||
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig)
|
||||||
|
s.sshClient = sshClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating ssh client: %w", err)
|
||||||
|
}
|
||||||
|
_, _, err = s.sshClient.SendRequest("keepalive", false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sftpClient, err := sftp.NewClient(sshClient)
|
||||||
|
s.sftpClient = sftpClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating sftp client: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,22 +281,6 @@ func newScript() (*script, error) {
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *script) runCommands() (func() error, error) {
|
|
||||||
if s.cli == nil {
|
|
||||||
return noop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.runLabeledCommands("docker-volume-backup.exec-pre"); err != nil {
|
|
||||||
return noop, fmt.Errorf("runCommands: error running pre commands: %w", err)
|
|
||||||
}
|
|
||||||
return func() error {
|
|
||||||
if err := s.runLabeledCommands("docker-volume-backup.exec-post"); err != nil {
|
|
||||||
return fmt.Errorf("runCommands: error running post commands: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopContainers stops all Docker containers that are marked as to being
|
// stopContainers stops all Docker containers that are marked as to being
|
||||||
// stopped during the backup and returns a function that can be called to
|
// stopped during the backup and returns a function that can be called to
|
||||||
// restart everything that has been stopped.
|
// restart everything that has been stopped.
|
||||||
@@ -348,12 +400,18 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
}, stopError
|
}, stopError
|
||||||
}
|
}
|
||||||
|
|
||||||
// takeBackup creates a tar archive of the configured backup location and
|
// createArchive creates a tar archive of the configured backup location and
|
||||||
// saves it to disk.
|
// saves it to disk.
|
||||||
func (s *script) takeBackup() error {
|
func (s *script) createArchive() error {
|
||||||
backupSources := s.c.BackupSources
|
backupSources := s.c.BackupSources
|
||||||
|
|
||||||
if s.c.BackupFromSnapshot {
|
if s.c.BackupFromSnapshot {
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
@@ -380,7 +438,28 @@ func (s *script) takeBackup() error {
|
|||||||
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err := createArchive(backupSources, tarFile); err != nil {
|
|
||||||
|
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error getting absolute path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var filesEligibleForBackup []string
|
||||||
|
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -388,10 +467,10 @@ func (s *script) takeBackup() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
// In case no passphrase is given it returns early, leaving the backup file
|
||||||
// untouched.
|
// untouched.
|
||||||
func (s *script) encryptBackup() error {
|
func (s *script) encryptArchive() error {
|
||||||
if s.c.GpgPassphrase == "" {
|
if s.c.GpgPassphrase == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -435,9 +514,9 @@ func (s *script) encryptBackup() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyBackup makes sure the backup file is copied to both local and remote locations
|
// copyArchive makes sure the backup file is copied to both local and remote locations
|
||||||
// as per the given configuration.
|
// as per the given configuration.
|
||||||
func (s *script) copyBackup() error {
|
func (s *script) copyArchive() error {
|
||||||
_, name := path.Split(s.file)
|
_, name := path.Split(s.file)
|
||||||
if stat, err := os.Stat(s.file); err != nil {
|
if stat, err := os.Stat(s.file); err != nil {
|
||||||
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
|
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
|
||||||
@@ -453,6 +532,7 @@ func (s *script) copyBackup() error {
|
|||||||
if s.minioClient != nil {
|
if s.minioClient != nil {
|
||||||
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
|
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
|
||||||
ContentType: "application/tar+gzip",
|
ContentType: "application/tar+gzip",
|
||||||
|
StorageClass: s.c.AwsStorageClass,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||||
}
|
}
|
||||||
@@ -473,6 +553,52 @@ func (s *script) copyBackup() error {
|
|||||||
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
source, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
|
||||||
|
chunk := make([]byte, 1000000)
|
||||||
|
for {
|
||||||
|
num, err := source.Read(chunk)
|
||||||
|
if err == io.EOF {
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath)
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
@@ -528,7 +654,8 @@ func (s *script) pruneBackups() error {
|
|||||||
if s.minioClient != nil {
|
if s.minioClient != nil {
|
||||||
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
||||||
WithMetadata: true,
|
WithMetadata: true,
|
||||||
Prefix: s.c.BackupPruningPrefix,
|
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
|
||||||
|
Recursive: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
var matches []minio.ObjectInfo
|
var matches []minio.ObjectInfo
|
||||||
@@ -605,6 +732,37 @@ func (s *script) pruneBackups() error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if candidate.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.SSH = StorageStats{
|
||||||
|
Total: uint(len(candidates)),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), len(candidates), "SSH backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
globPattern := path.Join(
|
globPattern := path.Join(
|
||||||
s.c.BackupArchive,
|
s.c.BackupArchive,
|
||||||
|
|||||||
@@ -30,10 +30,11 @@ type StorageStats struct {
|
|||||||
PruneErrors uint
|
PruneErrors uint
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoragesStats stats about each possible archival location (Local, WebDAV, S3)
|
// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3)
|
||||||
type StoragesStats struct {
|
type StoragesStats struct {
|
||||||
Local StorageStats
|
Local StorageStats
|
||||||
WebDAV StorageStats
|
WebDAV StorageStats
|
||||||
|
SSH StorageStats
|
||||||
S3 StorageStats
|
S3 StorageStats
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -42,6 +43,7 @@ type Stats struct {
|
|||||||
StartTime time.Time
|
StartTime time.Time
|
||||||
EndTime time.Time
|
EndTime time.Time
|
||||||
TookTime time.Duration
|
TookTime time.Duration
|
||||||
|
LockedTime time.Duration
|
||||||
LogOutput *bytes.Buffer
|
LogOutput *bytes.Buffer
|
||||||
Containers ContainersStats
|
Containers ContainersStats
|
||||||
BackupFile BackupFileStats
|
BackupFile BackupFileStats
|
||||||
|
|||||||
@@ -10,27 +10,10 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gofrs/flock"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var noop = func() error { return nil }
|
var noop = func() error { return nil }
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
|
||||||
// caller invokes the returned release func. When invoked while the file is
|
|
||||||
// still locked the function panics.
|
|
||||||
func lock(lockfile string) func() error {
|
|
||||||
fileLock := flock.New(lockfile)
|
|
||||||
acquired, err := fileLock.TryLock()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if !acquired {
|
|
||||||
panic("unable to acquire file lock")
|
|
||||||
}
|
|
||||||
return fileLock.Unlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy creates a copy of the file located at `dst` at `src`.
|
// copy creates a copy of the file located at `dst` at `src`.
|
||||||
func copyFile(src, dst string) error {
|
func copyFile(src, dst string) error {
|
||||||
in, err := os.Open(src)
|
in, err := os.Open(src)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ Here is a list of all data passed to the template:
|
|||||||
* `StartTime`: time when the script started execution
|
* `StartTime`: time when the script started execution
|
||||||
* `EndTime`: time when the backup has completed successfully (after pruning)
|
* `EndTime`: time when the backup has completed successfully (after pruning)
|
||||||
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
||||||
|
* `LockedTime`: amount of time it took for the backup to acquire the exclusive lock
|
||||||
* `LogOutput`: full log of the application
|
* `LogOutput`: full log of the application
|
||||||
* `Containers`: object containing stats about the docker containers
|
* `Containers`: object containing stats about the docker containers
|
||||||
* `All`: total number of containers
|
* `All`: total number of containers
|
||||||
@@ -24,15 +25,16 @@ Here is a list of all data passed to the template:
|
|||||||
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
* `Size`: size in bytes of the backup file
|
* `Size`: size in bytes of the backup file
|
||||||
* `Storages`: object that holds stats about each storage
|
* `Storages`: object that holds stats about each storage
|
||||||
* `Local`, `S3` or `WebDAV`:
|
* `Local`, `S3`, `WebDAV` or `SSH`:
|
||||||
* `Total`: total number of backup files
|
* `Total`: total number of backup files
|
||||||
* `Pruned`: number of backup files that were deleted due to pruning rule
|
* `Pruned`: number of backup files that were deleted due to pruning rule
|
||||||
* `PruneErrors`: number of backup files that were unable to be pruned
|
* `PruneErrors`: number of backup files that were unable to be pruned
|
||||||
|
|
||||||
## Functions
|
## Functions
|
||||||
|
|
||||||
Some formatting functions are also available:
|
Some formatting and helper functions are also available:
|
||||||
|
|
||||||
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
|
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
|
||||||
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
|
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
|
||||||
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
|
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
|
||||||
|
* `env`: returns the value of the environment variable of the given key if set
|
||||||
|
|||||||
@@ -22,4 +22,4 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Starting cron in foreground."
|
echo "Starting cron in foreground."
|
||||||
crond -f -l 8
|
crond -f -d 8
|
||||||
|
|||||||
36
go.mod
36
go.mod
@@ -1,6 +1,6 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containrrr/shoutrrr v0.5.2
|
github.com/containrrr/shoutrrr v0.5.2
|
||||||
@@ -11,14 +11,16 @@ require (
|
|||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.16
|
github.com/minio/minio-go/v7 v7.0.16
|
||||||
github.com/otiai10/copy v1.7.0
|
github.com/otiai10/copy v1.7.0
|
||||||
|
github.com/pkg/sftp v1.13.5
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f
|
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
|
||||||
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/containerd/containerd v1.5.5 // indirect
|
github.com/containerd/containerd v1.6.6 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
@@ -26,33 +28,39 @@ require (
|
|||||||
github.com/fatih/color v1.10.0 // indirect
|
github.com/fatih/color v1.10.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.0 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/gorilla/mux v1.7.3 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.13.6 // indirect
|
github.com/klauspost/compress v1.15.6 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/nxadm/tail v1.4.6 // indirect
|
github.com/nxadm/tail v1.4.6 // indirect
|
||||||
github.com/onsi/ginkgo v1.14.2 // indirect
|
github.com/onsi/ginkgo v1.14.2 // indirect
|
||||||
github.com/onsi/gomega v1.10.3 // indirect
|
github.com/onsi/gomega v1.10.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.3.0 // indirect
|
github.com/rs/xid v1.3.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/text v0.3.6 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
|
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
|
||||||
google.golang.org/grpc v1.33.2 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.26.0 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
gopkg.in/ini.v1 v1.65.0 // indirect
|
gopkg.in/ini.v1 v1.65.0 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
docker network create test_network
|
docker network create test_network
|
||||||
docker volume create backup_data
|
docker volume create backup_data
|
||||||
@@ -50,17 +52,11 @@ docker run --rm -it \
|
|||||||
-v backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
pass "Found relevant files in untared remote backup."
|
||||||
|
|
||||||
# This test does not stop containers during backup. This is happening on
|
# This test does not stop containers during backup. This is happening on
|
||||||
# purpose in order to cover this setup as well.
|
# purpose in order to cover this setup as well.
|
||||||
if [ "$(docker ps -q | wc -l)" != "2" ]; then
|
expect_running_containers "2"
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
|
||||||
docker ps
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
docker rm $(docker stop minio offen)
|
docker rm $(docker stop minio offen)
|
||||||
docker volume rm backup_data app_data
|
docker volume rm backup_data app_data
|
||||||
|
|||||||
@@ -10,8 +10,9 @@ services:
|
|||||||
MARIADB_ROOT_PASSWORD: test
|
MARIADB_ROOT_PASSWORD: test
|
||||||
MARIADB_DATABASE: backup
|
MARIADB_DATABASE: backup
|
||||||
labels:
|
labels:
|
||||||
|
# this is testing the deprecated label on purpose
|
||||||
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
|
||||||
- docker-volume-backup.exec-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
|
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
|
||||||
- docker-volume-backup.exec-label=test
|
- docker-volume-backup.exec-label=test
|
||||||
volumes:
|
volumes:
|
||||||
- app_data:/tmp/volume
|
- app_data:/tmp/volume
|
||||||
|
|||||||
@@ -3,7 +3,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
sleep 30 # mariadb likes to take a bit before responding
|
sleep 30 # mariadb likes to take a bit before responding
|
||||||
@@ -13,29 +14,27 @@ sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive
|
|||||||
|
|
||||||
tar -xvf ./local/test.tar.gz
|
tar -xvf ./local/test.tar.gz
|
||||||
if [ ! -f ./backup/data/dump.sql ]; then
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
echo "[TEST:FAIL] Could not find file written by pre command."
|
fail "Could not find file written by pre command."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Found expected file."
|
pass "Found expected file."
|
||||||
|
|
||||||
if [ -f ./backup/data/post.txt ]; then
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
echo "[TEST:FAIL] File created in post command was present in backup."
|
fail "File created in post command was present in backup."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Did not find unexpected file."
|
pass "Did not find unexpected file."
|
||||||
|
|
||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
sudo rm -rf ./local
|
sudo rm -rf ./local
|
||||||
|
|
||||||
|
|
||||||
echo "[TEST:INFO] Running commands test in swarm mode next."
|
info "Running commands test in swarm mode next."
|
||||||
|
|
||||||
docker swarm init
|
docker swarm init
|
||||||
|
|
||||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
info "Backup container not ready yet. Retrying."
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -47,16 +46,14 @@ sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archi
|
|||||||
|
|
||||||
tar -xvf ./local/test.tar.gz
|
tar -xvf ./local/test.tar.gz
|
||||||
if [ ! -f ./backup/data/dump.sql ]; then
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
echo "[TEST:FAIL] Could not find file written by pre command."
|
fail "Could not find file written by pre command."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Found expected file."
|
pass "Found expected file."
|
||||||
|
|
||||||
if [ -f ./backup/data/post.txt ]; then
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
echo "[TEST:FAIL] File created in post command was present in backup."
|
fail "File created in post command was present in backup."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Did not find unexpected file."
|
pass "Did not find unexpected file."
|
||||||
|
|
||||||
docker stack rm test_stack
|
docker stack rm test_stack
|
||||||
docker swarm leave --force
|
docker swarm leave --force
|
||||||
|
|||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd $(dirname $0)
|
|
||||||
|
|
||||||
mkdir -p local
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# A symlink for a known file in the volume is created so the test can check
|
|
||||||
# whether symlinks are preserved on backup.
|
|
||||||
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
|
||||||
docker-compose exec backup backup
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then
|
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
|
||||||
docker-compose ps
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
|
|
||||||
docker run --rm -it \
|
|
||||||
-v compose_minio_backup_data:/minio_data \
|
|
||||||
-v compose_webdav_backup_data:/webdav_data alpine \
|
|
||||||
ash -c 'apk add gnupg && \
|
|
||||||
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
|
|
||||||
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
|
|
||||||
|
|
||||||
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
|
|
||||||
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
|
||||||
rm ./local/decrypted.tar.gz
|
|
||||||
test -L /tmp/backup/app_data/db.link
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in decrypted and untared local backup."
|
|
||||||
|
|
||||||
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
|
|
||||||
echo "[TEST:PASS] Found symlink to latest version in local backup."
|
|
||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
|
||||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
|
||||||
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
docker-compose exec backup backup
|
|
||||||
|
|
||||||
docker run --rm -it \
|
|
||||||
-v compose_minio_backup_data:/minio_data \
|
|
||||||
-v compose_webdav_backup_data:/webdav_data alpine \
|
|
||||||
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
|
|
||||||
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Remote backups have not been deleted."
|
|
||||||
|
|
||||||
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
|
||||||
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
|
|
||||||
find ./local -type f
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "[TEST:PASS] Local backups have not been deleted."
|
|
||||||
|
|
||||||
docker-compose down --volumes
|
|
||||||
2
test/confd/02backup.env
Normal file
2
test/confd/02backup.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="other.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
@@ -7,8 +7,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
- ./local:/archive
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- ./backup.env:/etc/dockervolumebackup/conf.d/00backup.env
|
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||||
- ./never.env:/etc/dockervolumebackup/conf.d/10never.env
|
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||||
|
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
offen:
|
offen:
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
mkdir -p local
|
mkdir -p local
|
||||||
|
|
||||||
@@ -14,13 +16,16 @@ sleep 100
|
|||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
|
|
||||||
if [ ! -f ./local/conf.tar.gz ]; then
|
if [ ! -f ./local/conf.tar.gz ]; then
|
||||||
echo "[TEST:FAIL] Config from file was not used."
|
fail "Config from file was not used."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Config from file was used."
|
pass "Config from file was used."
|
||||||
|
|
||||||
|
if [ ! -f ./local/other.tar.gz ]; then
|
||||||
|
fail "Run on same schedule did not succeed."
|
||||||
|
fi
|
||||||
|
pass "Run on same schedule succeeded."
|
||||||
|
|
||||||
if [ -f ./local/never.tar.gz ]; then
|
if [ -f ./local/never.tar.gz ]; then
|
||||||
echo "[TEST:FAIL] Unexpected file was found."
|
fail "Unexpected file was found."
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Unexpected cron did not run."
|
pass "Unexpected cron did not run."
|
||||||
|
|||||||
26
test/gpg/docker-compose.yml
Normal file
26
test/gpg/docker-compose.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
GPG_PASSPHRASE: 1234secret
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
34
test/gpg/run.sh
Executable file
34
test/gpg/run.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
|
||||||
|
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||||
|
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
|
||||||
|
ls -lah $tmp_dir
|
||||||
|
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm ./local/decrypted.tar.gz
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
1
test/ignore/.gitignore
vendored
Normal file
1
test/ignore/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
15
test/ignore/docker-compose.yml
Normal file
15
test/ignore/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- ./sources:/backup/data:ro
|
||||||
28
test/ignore/run.sh
Normal file
28
test/ignore/run.sh
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
out=$(mktemp -d)
|
||||||
|
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
|
||||||
|
|
||||||
|
if [ ! -f "$out/backup/data/me.txt" ]; then
|
||||||
|
fail "Expected file was not found."
|
||||||
|
fi
|
||||||
|
pass "Expected file was found."
|
||||||
|
|
||||||
|
if [ -f "$out/backup/data/skip.me" ]; then
|
||||||
|
fail "Ignored file was found."
|
||||||
|
fi
|
||||||
|
pass "Ignored file was not found."
|
||||||
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/skip.me
Normal file
0
test/ignore/sources/skip.me
Normal file
1
test/local/.gitignore
vendored
Normal file
1
test/local/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
29
test/local/docker-compose.yml
Normal file
29
test/local/docker-compose.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ./local:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
55
test/local/run.sh
Executable file
55
test/local/run.sh
Executable file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# A symlink for a known file in the volume is created so the test can check
|
||||||
|
# whether symlinks are preserved on backup.
|
||||||
|
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
|
||||||
|
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm -f ./local/test-hostnametoken.tar.gz
|
||||||
|
|
||||||
|
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
|
||||||
|
fail "Could not find expected symlink in untared archive."
|
||||||
|
fi
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
|
||||||
|
fail "Could not find symlink to latest version."
|
||||||
|
fi
|
||||||
|
|
||||||
|
pass "Found symlink to latest version in local backup."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
||||||
|
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
|
||||||
|
fi
|
||||||
|
pass "Local backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
@@ -10,6 +10,7 @@ services:
|
|||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
NOTIFICATION_LEVEL: info
|
NOTIFICATION_LEVEL: info
|
||||||
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
|
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
|
||||||
|
EXTRA_VALUE: extra-value
|
||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
- ./local:/archive
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
{{ define "title_success" -}}
|
{{ define "title_success" -}}
|
||||||
Successful test run, yay!
|
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{ define "body_success" -}}
|
{{ define "body_success" -}}
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
mkdir -p local
|
mkdir -p local
|
||||||
|
|
||||||
@@ -10,16 +12,15 @@ docker-compose up -d
|
|||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
|
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
|
||||||
echo "[TEST:INFO] Set up Gotify application using token $GOTIFY_TOKEN"
|
info "Set up Gotify application using token $GOTIFY_TOKEN"
|
||||||
|
|
||||||
docker-compose exec backup backup
|
docker-compose exec backup backup
|
||||||
|
|
||||||
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
if [ "$NUM_MESSAGES" != 0 ]; then
|
if [ "$NUM_MESSAGES" != 0 ]; then
|
||||||
echo "[TEST:FAIL] Expected no notifications to be sent when not configured"
|
fail "Expected no notifications to be sent when not configured"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] No notifications were sent when not configured."
|
pass "No notifications were sent when not configured."
|
||||||
|
|
||||||
docker-compose down
|
docker-compose down
|
||||||
|
|
||||||
@@ -29,24 +30,21 @@ docker-compose exec backup backup
|
|||||||
|
|
||||||
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
if [ "$NUM_MESSAGES" != 1 ]; then
|
if [ "$NUM_MESSAGES" != 1 ]; then
|
||||||
echo "[TEST:FAIL] Expected one notifications to be sent when configured"
|
fail "Expected one notifications to be sent when configured"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Correct number of notifications were sent when configured."
|
pass "Correct number of notifications were sent when configured."
|
||||||
|
|
||||||
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
|
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
|
||||||
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
|
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
|
||||||
|
|
||||||
if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then
|
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
|
||||||
echo "[TEST:FAIL] Unexpected notification title $MESSAGE_TITLE"
|
fail "Unexpected notification title $MESSAGE_TITLE"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Custom notification title was used."
|
pass "Custom notification title was used."
|
||||||
|
|
||||||
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
|
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
|
||||||
echo "[TEST:FAIL] Unexpected notification body $MESSAGE_BODY"
|
fail "Unexpected notification body $MESSAGE_BODY"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
echo "[TEST:PASS] Custom notification body was used."
|
pass "Custom notification body was used."
|
||||||
|
|
||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
|
|||||||
@@ -4,6 +4,8 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
mkdir -p local
|
mkdir -p local
|
||||||
|
|
||||||
@@ -12,17 +14,17 @@ sleep 5
|
|||||||
|
|
||||||
docker-compose exec backup backup
|
docker-compose exec backup backup
|
||||||
|
|
||||||
sudo tar --same-owner -xvf ./local/backup.tar.gz -C /tmp
|
tmp_dir=$(mktemp -d)
|
||||||
|
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
|
||||||
|
|
||||||
sudo find /tmp/backup/postgres > /dev/null
|
sudo find $tmp_dir/backup/postgres > /dev/null
|
||||||
echo "[TEST:PASS] Backup contains files at expected location"
|
pass "Backup contains files at expected location"
|
||||||
|
|
||||||
for file in $(sudo find /tmp/backup/postgres); do
|
for file in $(sudo find $tmp_dir/backup/postgres); do
|
||||||
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
|
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
|
||||||
echo "[TEST:FAIL] Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
|
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
echo "[TEST:PASS] All files and directories in backup preserved their ownership."
|
pass "All files and directories in backup preserved their ownership."
|
||||||
|
|
||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
|
|||||||
@@ -12,21 +12,11 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- minio_backup_data:/data
|
- minio_backup_data:/data
|
||||||
|
|
||||||
webdav:
|
|
||||||
image: bytemark/webdav:2.4
|
|
||||||
environment:
|
|
||||||
AUTH_TYPE: Digest
|
|
||||||
USERNAME: test
|
|
||||||
PASSWORD: test
|
|
||||||
volumes:
|
|
||||||
- webdav_backup_data:/var/lib/dav
|
|
||||||
|
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
hostname: hostnametoken
|
hostname: hostnametoken
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
- webdav
|
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID: test
|
AWS_ACCESS_KEY_ID: test
|
||||||
@@ -36,18 +26,11 @@ services:
|
|||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
|
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
GPG_PASSPHRASE: 1234secret
|
|
||||||
WEBDAV_URL: http://webdav/
|
|
||||||
WEBDAV_PATH: /my/new/path/
|
|
||||||
WEBDAV_USERNAME: test
|
|
||||||
WEBDAV_PASSWORD: test
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
@@ -60,5 +43,5 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
minio_backup_data:
|
minio_backup_data:
|
||||||
webdav_backup_data:
|
name: minio_backup_data
|
||||||
app_data:
|
app_data:
|
||||||
42
test/s3/run.sh
Executable file
42
test/s3/run.sh
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# A symlink for a known file in the volume is created so the test can check
|
||||||
|
# whether symlinks are preserved on backup.
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v minio_backup_data:/minio_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in untared remote backups."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v minio_backup_data:/minio_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
47
test/ssh/docker-compose.yml
Normal file
47
test/ssh/docker-compose.yml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
ssh:
|
||||||
|
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- USER_NAME=test
|
||||||
|
volumes:
|
||||||
|
- ./id_rsa.pub:/config/.ssh/authorized_keys
|
||||||
|
- ssh_backup_data:/tmp
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
depends_on:
|
||||||
|
- ssh
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
SSH_HOST_NAME: ssh
|
||||||
|
SSH_PORT: 2222
|
||||||
|
SSH_USER: test
|
||||||
|
SSH_REMOTE_PATH: /tmp
|
||||||
|
SSH_IDENTITY_PASSPHRASE: test1234
|
||||||
|
volumes:
|
||||||
|
- ./id_rsa:/root/.ssh/id_rsa
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ssh_backup_data:
|
||||||
|
name: ssh_backup_data
|
||||||
|
app_data:
|
||||||
43
test/ssh/run.sh
Executable file
43
test/ssh/run.sh
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers 3
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v ssh_backup_data:/ssh_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared remote backups."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v ssh_backup_data:/ssh_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
rm -f id_rsa id_rsa.pub
|
||||||
@@ -43,6 +43,8 @@ services:
|
|||||||
image: offen/offen:latest
|
image: offen/offen:latest
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.stop-during-backup=true
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
healthcheck:
|
||||||
|
disable: true
|
||||||
deploy:
|
deploy:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
restart_policy:
|
restart_policy:
|
||||||
@@ -62,4 +64,5 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backup_data:
|
backup_data:
|
||||||
|
name: backup_data
|
||||||
pg_data:
|
pg_data:
|
||||||
|
|||||||
@@ -3,13 +3,15 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
docker swarm init
|
docker swarm init
|
||||||
|
|
||||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
info "Backup container not ready yet. Retrying."
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -18,18 +20,13 @@ sleep 20
|
|||||||
docker exec $(docker ps -q -f name=backup) backup
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v test_stack_backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
pass "Found relevant files in untared backup."
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
expect_running_containers "5"
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
|
||||||
docker ps -a
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
docker stack rm test_stack
|
docker stack rm test_stack
|
||||||
docker swarm leave --force
|
docker swarm leave --force
|
||||||
|
|||||||
23
test/util.sh
Normal file
23
test/util.sh
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
info () {
|
||||||
|
echo "[test:${current_test:-none}:info] "$1""
|
||||||
|
}
|
||||||
|
|
||||||
|
pass () {
|
||||||
|
echo "[test:${current_test:-none}:pass] "$1""
|
||||||
|
}
|
||||||
|
|
||||||
|
fail () {
|
||||||
|
echo "[test:${current_test:-none}:fail] "$1""
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
expect_running_containers () {
|
||||||
|
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
|
||||||
|
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
|
||||||
|
fi
|
||||||
|
pass "$1 containers running."
|
||||||
|
}
|
||||||
45
test/webdav/docker-compose.yml
Normal file
45
test/webdav/docker-compose.yml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
webdav:
|
||||||
|
image: bytemark/webdav:2.4
|
||||||
|
environment:
|
||||||
|
AUTH_TYPE: Digest
|
||||||
|
USERNAME: test
|
||||||
|
PASSWORD: test
|
||||||
|
volumes:
|
||||||
|
- webdav_backup_data:/var/lib/dav
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
depends_on:
|
||||||
|
- webdav
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
WEBDAV_URL: http://webdav/
|
||||||
|
WEBDAV_URL_INSECURE: 'true'
|
||||||
|
WEBDAV_PATH: /my/new/path/
|
||||||
|
WEBDAV_USERNAME: test
|
||||||
|
WEBDAV_PASSWORD: test
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
webdav_backup_data:
|
||||||
|
name: webdav_backup_data
|
||||||
|
app_data:
|
||||||
40
test/webdav/run.sh
Executable file
40
test/webdav/run.sh
Executable file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v webdav_backup_data:/webdav_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v webdav_backup_data:/webdav_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
Reference in New Issue
Block a user