Compare commits

..

2 Commits

Author SHA1 Message Date
Frederik Ring
8caac8724c Add documentation for header format option 2022-04-19 21:21:23 +02:00
Frederik Ring
9eda23e512 Make header format for created tar archive configurable 2022-04-19 21:11:29 +02:00
73 changed files with 1402 additions and 3576 deletions

72
.circleci/config.yml Normal file
View File

@@ -0,0 +1,72 @@
version: 2.1
jobs:
canary:
machine:
image: ubuntu-2004:202201-02
working_directory: ~/docker-volume-backup
steps:
- checkout
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Install gnupg
command: |
sudo apt-get install -y gnupg
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
export GPG_TTY=$(tty)
./test.sh canary
build:
docker:
- image: cimg/base:2020.06
environment:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
command: |
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
docker run --rm --privileged linuxkit/binfmt:v0.8
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
tag_args="-t offen/docker-volume-backup:$CIRCLE_TAG"
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
tag_args="$tag_args -t offen/docker-volume-backup:latest"
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
fi
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
$tag_args . --push
workflows:
version: 2
docker_image:
jobs:
- canary:
filters:
tags:
ignore: /^v.*/
- build:
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
orbs:
docker: circleci/docker@1.0.1

View File

@@ -1,7 +1 @@
test
.github
.circleci
docs
.editorconfig
LICENSE
README.md

3
.github/FUNDING.yml vendored
View File

@@ -1,3 +0,0 @@
github: offen
patreon: offen

20
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,20 @@
* **I'm submitting a ...**
- [ ] bug report
- [ ] feature request
- [ ] support request
* **What is the current behavior?**
* **If the current behavior is a bug, please provide the configuration and steps to reproduce and if possible a minimal demo of the problem.**
* **What is the expected behavior?**
* **What is the motivation / use case for changing the behavior?**
* **Please tell us about your environment:**
- Image version:
- Docker version:
- docker-compose version:
* **Other information** (e.g. detailed explanation, stacktraces, related issues, suggestions how to fix, links for us to have context, eg. stackoverflow, etc)

View File

@@ -1,34 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
**To Reproduce**
Steps to reproduce the behavior:
1. ...
2. ...
3. ...
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Version (please complete the following information):**
- Image Version: <!-- e.g. v2.21.0 -->
- Docker Version: <!-- e.g. 20.10.17 -->
- Docker Compose Version (if applicable): <!-- e.g. 1.29.2 -->
**Additional context**
<!--
Add any other context about the problem here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Support request
about: Ask for help
title: ''
labels: ''
assignees: ''
---
**What are you trying to do?**
<!--
A clear and concise description of what you are trying to do, but cannot get working.
-->
**What is your current configuration?**
<!--
Add the full configuration you are using. Please redact out any real-world credentials.
-->
**Log output**
<!--
Provide the full log output of your setup.
-->
**Additional context**
<!--
Add any other context or screenshots about the support request here.
-->

View File

@@ -1,59 +0,0 @@
name: Release Docker Image
on:
push:
tags: v**
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract Docker tags
id: meta
run: |
version_tag="${{github.ref_name}}"
tags=($version_tag)
if [[ "$version_tag" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest` nor `v2`
tags+=("latest")
tags+=($(echo "$version_tag" | cut -d. -f1))
fi
releases=""
for tag in "${tags[@]}"; do
releases="${releases:+$releases,}offen/docker-volume-backup:$tag,ghcr.io/offen/docker-volume-backup:$tag"
done
echo "releases=$releases" >> "$GITHUB_OUTPUT"
- name: Build and push Docker images
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.releases }}

View File

@@ -1,30 +0,0 @@
name: Run Integration Tests
on:
push:
branches:
- main
pull_request:
jobs:
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build Docker Image
env:
DOCKER_BUILDKIT: '1'
run: docker build . -t offen/docker-volume-backup:test
- name: Run Tests
working-directory: ./test
run: |
# Stop the buildx container so the tests can make assertions
# about the number of running containers
docker rm -f $(docker ps -aq)
export GPG_TTY=$(tty)
./test.sh test

View File

@@ -1,21 +1,24 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.20-alpine as builder
FROM golang:1.18-alpine as builder
WORKDIR /app
COPY . .
COPY go.mod go.sum ./
RUN go mod download
COPY cmd/backup ./cmd/backup/
WORKDIR /app/cmd/backup
RUN go build -o backup .
FROM alpine:3.17
FROM alpine:3.15
WORKDIR /root
RUN apk add --no-cache ca-certificates
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY --chmod=755 ./entrypoint.sh /root/
COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/root/entrypoint.sh"]

401
README.md
View File

@@ -4,24 +4,23 @@
# docker-volume-backup
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage or SSH compatible storage.
Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
<!-- MarkdownTOC -->
- [Quickstart](#quickstart)
- [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup)
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
- [Available image registries](#available-image-registries)
- [Configuration reference](#configuration-reference)
- [How to](#how-to)
- [Stop containers during backup](#stop-containers-during-backup)
- [Automatically pruning old backups](#automatically-pruning-old-backups)
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
- [Customize notifications](#customize-notifications)
- [Run custom commands during the backup lifecycle](#run-custom-commands-during-the-backup-lifecycle)
- [Run custom commands before / after backup](#run-custom-commands-before--after-backup)
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
@@ -29,21 +28,13 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
- [Manually triggering a backup](#manually-triggering-a-backup)
- [Update deprecated email configuration](#update-deprecated-email-configuration)
- [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage)
- [Replace deprecated `exec-pre` and `exec-post` labels](#replace-deprecated-exec-pre-and-exec-post-labels)
- [Using a custom Docker host](#using-a-custom-docker-host)
- [Use with rootless Docker](#use-with-rootless-docker)
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
- [Define different retention schedules](#define-different-retention-schedules)
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
- [Recipes](#recipes)
- [Backing up to AWS S3](#backing-up-to-aws-s3)
- [Backing up to Filebase](#backing-up-to-filebase)
- [Backing up to MinIO](#backing-up-to-minio)
- [Backing up to MinIO \(using Docker secrets\)](#backing-up-to-minio-using-docker-secrets)
- [Backing up to WebDAV](#backing-up-to-webdav)
- [Backing up to SSH](#backing-up-to-ssh)
- [Backing up to Azure Blob Storage](#backing-up-to-azure-blob-storage)
- [Backing up locally](#backing-up-locally)
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
@@ -122,18 +113,6 @@ docker run --rm \
Alternatively, pass a `--env-file` in order to use a full config as described below.
### Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
```
docker pull offen/docker-volume-backup:v2
docker pull ghcr.io/offen/docker-volume-backup:v2
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Configuration reference
Backup targets, schedule and retention are configured in environment variables.
@@ -188,12 +167,6 @@ You can populate below template according to your requirements and use it as you
# BACKUP_SOURCES="/other/location"
# When given, all files in BACKUP_SOURCES whose full path matches the given
# regular expression will be excluded from the archive. Regular Expressions
# can be used as from the Go standard library https://pkg.go.dev/regexp
# BACKUP_EXCLUDE_REGEXP="\.log$"
########### BACKUP STORAGE
# The name of the remote bucket that should be used for storing backups. If
@@ -213,14 +186,6 @@ You can populate below template according to your requirements and use it as you
# AWS_ACCESS_KEY_ID="<xxx>"
# AWS_SECRET_ACCESS_KEY="<xxx>"
# It is possible to provide the keys in files, allowing to hide the sensitive data.
# These values have a higher priority than the ones above, meaning if both are set
# the values from the files will be used.
# This option is most useful with Docker [secrets](https://docs.docker.com/engine/swarm/secrets/).
# AWS_ACCESS_KEY_ID_FILE="/path/to/file"
# AWS_SECRET_ACCESS_KEY_FILE="/path/to/file"
# Instead of providing static credentials, you can also use IAM instance profiles
# or similar to provide authentication. Some possible configuration options on AWS:
# - EC2: http://169.254.169.254
@@ -242,24 +207,12 @@ You can populate below template according to your requirements and use it as you
# AWS_ENDPOINT_PROTO="https"
# Setting this variable to `true` will disable verification of
# SSL certificates for AWS_ENDPOINT. You shouldn't use this unless you use
# self-signed certificates for your remote storage backend. This can only be
# used when AWS_ENDPOINT_PROTO is set to `https`.
# SSL certificates. You shouldn't use this unless you use self-signed
# certificates for your remote storage backend. This can only be used
# when AWS_ENDPOINT_PROTO is set to `https`.
# AWS_ENDPOINT_INSECURE="true"
# If you wish to use self signed certificates your S3 server, you can pass
# the location of a PEM encoded CA certificate and it will be used for
# validating your certificates.
# Alternatively, pass a PEM encoded string containing the certificate.
# AWS_ENDPOINT_CA_CERT="/path/to/cert.pem"
# Setting this variable will change the S3 storage class header.
# Defaults to "STANDARD", you can set this value according to your needs.
# AWS_STORAGE_CLASS="GLACIER"
# You can also backup files to any WebDAV server:
# The URL of the remote WebDAV server
@@ -279,65 +232,6 @@ You can populate below template according to your requirements and use it as you
# WEBDAV_PASSWORD="password"
# Setting this variable to `true` will disable verification of
# SSL certificates for WEBDAV_URL. You shouldn't use this unless you use
# self-signed certificates for your remote storage backend.
# WEBDAV_URL_INSECURE="true"
# You can also backup files to any SSH server:
# The URL of the remote SSH server
# SSH_HOST_NAME="server.local"
# The port of the remote SSH server
# Optional variable default value is `22`
# SSH_PORT=2222
# The Directory to place the backups to on the SSH server.
# SSH_REMOTE_PATH="/my/directory/"
# The username for the SSH server
# SSH_USER="user"
# The password for the SSH server
# SSH_PASSWORD="password"
# The private key path in container for SSH server
# Default value: /root/.ssh/id_rsa
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
# also work.
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
# The passphrase for the identity file
# SSH_IDENTITY_PASSPHRASE="pass"
# The credential's account name when using Azure Blob Storage. This has to be
# set when using Azure Blob Storage.
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
# The credential's primary account key when using Azure Blob Storage. If this
# is not given, the command tries to fall back to using a managed identity.
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
# The container name when using Azure Blob Storage.
# AZURE_STORAGE_CONTAINER_NAME="container-name"
# The service endpoint when using Azure Blob Storage. This is a template that
# can be passed the account name as shown in the default value below.
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
# In addition to storing backups remotely, you can also keep local copies.
# Pass a container-local path to store your backups if needed. You also need to
# mount a local folder or Docker volume into that location (`/archive`
@@ -403,7 +297,7 @@ You can populate below template according to your requirements and use it as you
# It is possible to define commands to be run in any container before and after
# a backup is conducted. The commands themselves are defined in labels like
# `docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
# `docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
# Several options exist for controlling this feature:
# By default, any output of such a command is suppressed. If this value
@@ -424,7 +318,7 @@ You can populate below template according to your requirements and use it as you
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
# Configuration is provided as a comma-separated list of URLs as consumed
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
# The content of such notifications can be customized. Dedicated documentation
# on how to do this can be found in the README. When providing multiple URLs or
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
@@ -457,6 +351,19 @@ You can populate below template according to your requirements and use it as you
# LOCK_TIMEOUT="60m"
########### HEADER FORMAT USED BY THE TAR ARCHIVE
# By default, tar archive creation will pick a header format that is appropriate
# for the archive's contents. In case you have special requirements or need to
# work with tools that do not support all standard header formats, you can use
# this option to enforce a certain header format. Valid options are "USTAR",
# "PAX" and "GNU". Be aware that setting this value might create situations where
# it's not possible to encode the information about a certain file, making the
# backup fail.
# In case no value is set, an appropriate format will be selected for each file.
# TAR_ARCHIVE_HEADER_FORMAT="USTAR"
########### EMAIL NOTIFICATIONS
# ************************************************************************
@@ -488,7 +395,7 @@ You can populate below template according to your requirements and use it as you
# EMAIL_SMTP_PORT="<port>"
```
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`][compose-issue].
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`].
You can work around this by either updating `docker-compose` or unquoting your configuration values.
[compose-issue]: https://github.com/docker/compose/issues/2854
@@ -568,7 +475,7 @@ services:
Notification backends other than email are also supported.
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
### Customize notifications
@@ -595,16 +502,11 @@ Overridable template names are: `title_success`, `body_success`, `title_failure`
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
### Run custom commands during the backup lifecycle
### Run custom commands before / after backup
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecyle:
- `archive` (the tar archive is created)
- `process` (the tar archive is processed, e.g. encrypted - optional)
- `copy` (the tar archive is copied to all configured storages)
- `prune` (existing backups are pruned based on the defined ruleset - optional)
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container.
Such commands are defined by specifying the command in a `docker-volume-backup.exec-[pre|post]` label.
Taking a database dump using `mysqldump` would look like this:
@@ -618,7 +520,7 @@ services:
volumes:
- backup_data:/tmp/backups
labels:
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
volumes:
backup_data:
@@ -638,7 +540,7 @@ services:
volumes:
- backup_data:/tmp/backups
labels:
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.exec-label=database
backup:
@@ -654,26 +556,9 @@ volumes:
```
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
The backup procedure is guaranteed to wait for all `pre` commands to finish.
However there are no guarantees about the order in which they are run, which could also happen concurrently.
By default the backup command is executed by the root user. It is possible to specify a custom user that is used to run commands in dedicated labels with the format `docker-volume-backup.[step]-[pre|post].user`:
```yml
version: '3'
services:
gitea:
image: gitea/gitea
volumes:
- backup_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=git
- docker-volume-backup.archive-pre=/bin/bash -c 'cd /tmp; /usr/local/bin/gitea dump -c /data/gitea/conf/app.ini -R -f dump.zip'
```
Make sure the user exists and is present in `passwd` inside the target container.
### Encrypting your backup using GPG
The image supports encrypting backups using GPG out of the box.
@@ -797,7 +682,7 @@ NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.c
### Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
Starting with version 2.15.0, the `BACKUP_FROM_SNAPSHOT` feature has been deprecated.
If you need to prepare your sources before the backup is taken, use `archive-pre`, `archive-post` and an intermediate volume:
If you need to prepare your sources before the backup is taken, use `exec-pre`, `exec-post` and an intermediate volume:
```yml
version: '3'
@@ -809,11 +694,11 @@ services:
- data:/var/my_app
- backup:/tmp/backup
labels:
- docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
- docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
- docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
- docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
backup:
image: offen/docker-volume-backup:v2
image: offen/docker-volume-backup:latest
environment:
BACKUP_SOURCES: /tmp/backup
volumes:
@@ -825,23 +710,6 @@ volumes:
backup:
```
### Replace deprecated `exec-pre` and `exec-post` labels
Version 2.19.0 introduced the option to run labeled commands at multiple points in time during the backup lifecycle.
In order to be able to use more obvious terminology in the new labels, the existing `exec-pre` and `exec-post` labels have been deprecated.
If you want to emulate the existing behavior, all you need to do is change `exec-pre` to `archive-pre` and `exec-post` to `archive-post`:
```diff
labels:
- - docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
+ - docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
- - docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
+ - docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
```
The `EXEC_LABEL` setting and the `docker-volume-backup.exec-label` label stay as is.
Check the additional documentation on running commands during the backup lifecycle to find out about further possibilities.
### Using a custom Docker host
If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL.
@@ -851,23 +719,6 @@ DOCKER_HOST=tcp://docker_socket_proxy:2375
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
### Use with rootless Docker
It's also possible to use this image with a [rootless Docker installation][rootless-docker].
Instead of mounting `/var/run/docker.sock`, mount the user-specific socket into the container:
```yml
services:
backup:
image: offen/docker-volume-backup:v2
# ... configuration omitted
volumes:
- backup:/backup:ro
- /run/user/1000/docker.sock:/var/run/docker.sock:ro
```
[rootless-docker]: https://docs.docker.com/engine/security/rootless/
### Run multiple backup schedules in the same container
Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`:
@@ -895,112 +746,6 @@ The exact order of schedules that use the same cron expression is not specified.
In case you need your schedules to overlap, you need to create a dedicated container for each schedule instead.
When changing the configuration, you currently need to manually restart the container for the changes to take effect.
Set `BACKUP_SOURCES` for each config file to control which subset of volume mounts gets backed up:
```yml
# With a volume configuration like this:
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./configuration:/etc/dockervolumebackup/conf.d
- app1_data:/backup/app1_data:ro
- app2_data:/backup/app2_data:ro
```
```ini
# In the 1st config file:
BACKUP_SOURCES=/backup/app1_data
# In the 2nd config file:
BACKUP_SOURCES=/backup/app2_data
```
### Define different retention schedules
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
```ini
# 01daily.conf
BACKUP_FILENAME="daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
# run every day at 2am
BACKUP_CRON_EXPRESSION="0 2 * * *"
BACKUP_PRUNING_PREFIX="daily-backup-"
BACKUP_RETENTION_DAYS="7"
```
```ini
# 02weekly.conf
BACKUP_FILENAME="weekly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
# run every monday at 3am
BACKUP_CRON_EXPRESSION="0 3 * * 1"
BACKUP_PRUNING_PREFIX="weekly-backup-"
BACKUP_RETENTION_DAYS="31"
```
```ini
# 03monthly.conf
BACKUP_FILENAME="monthly-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
# run every 1st of a month at 4am
BACKUP_CRON_EXPRESSION="0 4 1 * *"
```
Note that while it's possible to define colliding cron schedules for each of these configurations, you might need to adjust the value for `LOCK_TIMEOUT` in case your backups are large and might take longer than an hour.
### Use special characters in notification URLs
The value given to `NOTIFICATION_URLS` is a comma separated list of URLs.
If such a URL contains special characters (e.g. commas) it needs to be URL encoded.
To get an encoded version of your URL, you can use the CLI tool provided by `shoutrrr` (which is the library used for sending notifications):
```
docker run --rm -ti containrrr/shoutrrr generate [service]
```
where service is any of the [supported services][shoutrrr-docs], e.g. for SMTP:
```
docker run --rm -ti containrrr/shoutrrr generate smtp
```
### Handle file uploads using third party tools
If you want to use a non-supported storage backend, or want to use a third party (e.g. rsync, rclone) tool for file uploads, you can build a Docker image containing the required binaries off this one, and call through to these in lifecycle hooks.
For example, if you wanted to use `rsync`, define your Docker image like this:
```Dockerfile
FROM offen/docker-volume-backup:v2
RUN apk add rsync
```
Using this image, you can now omit configuring any of the supported storage backends, and instead define your own mechanism in a `docker-volume-backup.copy-post` label:
```yml
version: '3'
services:
backup:
image: your-custom-image
restart: always
environment:
BACKUP_FILENAME: "daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
BACKUP_CRON_EXPRESSION: "0 2 * * *"
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'rsync $$COMMAND_RUNTIME_ARCHIVE_FILEPATH /destination'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
# other services defined here ...
volumes:
app_data:
```
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
## Recipes
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
@@ -1070,38 +815,6 @@ volumes:
data:
```
### Backing up to MinIO (using Docker secrets)
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_ENDPOINT: minio.example.com
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_access_key
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_secret_key
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
secrets:
- minio_access_key
- minio_secret_key
volumes:
data:
secrets:
minio_access_key:
# ... define how secret is accessed
minio_secret_key:
# ... define how secret is accessed
```
### Backing up to WebDAV
```yml
@@ -1124,50 +837,6 @@ volumes:
data:
```
### Backing up to SSH
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
SSH_HOST_NAME: server.local
SSH_PORT: 2222
SSH_USER: user
SSH_REMOTE_PATH: /data
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /path/to/private_key:/root/.ssh/id_rsa
volumes:
data:
```
### Backing up to Azure Blob Storage
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
AZURE_STORAGE_CONTAINER_NAME: backup-container
AZURE_STORAGE_ACCOUNT_NAME: account-name
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
volumes:
data:
```
### Backing up locally
```yml
@@ -1289,9 +958,9 @@ services:
database:
image: mariadb:latest
labels:
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
volumes:
- data:/tmp/dumps
- app_data:/tmp/dumps
backup:
image: offen/docker-volume-backup:v2
environment:

View File

@@ -11,13 +11,14 @@ import (
"compress/gzip"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
)
func createArchive(files []string, inputFilePath, outputFilePath string) error {
func createArchive(inputFilePath, outputFilePath string, options createArchiveOptions) error {
inputFilePath = stripTrailingSlashes(inputFilePath)
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
if err != nil {
@@ -27,7 +28,7 @@ func createArchive(files []string, inputFilePath, outputFilePath string) error {
return fmt.Errorf("createArchive: error creating output file path: %w", err)
}
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
if err := compress(inputFilePath, outputFilePath, filepath.Dir(inputFilePath), options); err != nil {
return fmt.Errorf("createArchive: error creating archive: %w", err)
}
@@ -51,7 +52,7 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
return inputFilePath, outputFilePath, err
}
func compress(paths []string, outFilePath, subPath string) error {
func compress(inPath, outFilePath, subPath string, options createArchiveOptions) error {
file, err := os.Create(outFilePath)
if err != nil {
return fmt.Errorf("compress: error creating out file: %w", err)
@@ -61,9 +62,17 @@ func compress(paths []string, outFilePath, subPath string) error {
gzipWriter := gzip.NewWriter(file)
tarWriter := tar.NewWriter(gzipWriter)
var paths []string
if err := filepath.WalkDir(inPath, func(path string, di fs.DirEntry, err error) error {
paths = append(paths, path)
return err
}); err != nil {
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
}
for _, p := range paths {
if err := writeTarGz(p, tarWriter, prefix); err != nil {
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
if err := writeTarGz(p, tarWriter, prefix, options.format); err != nil {
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
}
}
@@ -85,7 +94,7 @@ func compress(paths []string, outFilePath, subPath string) error {
return nil
}
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
func writeTarGz(path string, tarWriter *tar.Writer, prefix string, format tar.Format) error {
fileInfo, err := os.Lstat(path)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
@@ -104,6 +113,10 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
}
header, err := tar.FileInfoHeader(fileInfo, link)
if format >= 0 {
header.Format = format
}
if err != nil {
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
}
@@ -131,3 +144,7 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
return nil
}
type createArchiveOptions struct {
format tar.Format
}

View File

@@ -4,30 +4,14 @@
package main
import (
"crypto/x509"
"encoding/pem"
"archive/tar"
"fmt"
"io/ioutil"
"os"
"regexp"
"time"
)
// Config holds all configuration values that are expected to be set
// by users.
type Config struct {
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"`
@@ -38,7 +22,14 @@ type Config struct {
BackupPruningPrefix string `split_words:"true"`
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"`
@@ -49,71 +40,36 @@ type Config struct {
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
WebdavUrl string `split_words:"true"`
WebdavUrlInsecure bool `split_words:"true"`
WebdavPath string `split_words:"true" default:"/"`
WebdavUsername string `split_words:"true"`
WebdavPassword string `split_words:"true"`
SSHHostName string `split_words:"true"`
SSHPort string `split_words:"true" default:"22"`
SSHUser string `split_words:"true"`
SSHPassword string `split_words:"true"`
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
SSHIdentityPassphrase string `split_words:"true"`
SSHRemotePath string `split_words:"true"`
ExecLabel string `split_words:"true"`
ExecForwardOutput bool `split_words:"true"`
LockTimeout time.Duration `split_words:"true" default:"60m"`
AzureStorageAccountName string `split_words:"true"`
AzureStoragePrimaryAccountKey string `split_words:"true"`
AzureStorageContainerName string `split_words:"true"`
AzureStoragePath string `split_words:"true"`
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
TarArchiveHeaderFormat TarFormat `split_words:"true"`
}
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
if secretPath == "" {
return envVar, nil
}
data, err := os.ReadFile(secretPath)
if err != nil {
return "", fmt.Errorf("resolveSecret: error reading secret path: %w", err)
}
return string(data), nil
}
type TarFormat tar.Format
type CertDecoder struct {
Cert *x509.Certificate
}
func (c *CertDecoder) Decode(v string) error {
if v == "" {
func (t *TarFormat) Decode(value string) error {
switch value {
case "PAX":
*t = TarFormat(tar.FormatPAX)
return nil
}
content, err := ioutil.ReadFile(v)
if err != nil {
content = []byte(v)
}
block, _ := pem.Decode(content)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("config: error parsing certificate: %w", err)
}
*c = CertDecoder{Cert: cert}
case "USTAR":
*t = TarFormat(tar.FormatUSTAR)
return nil
case "GNU":
*t = TarFormat(tar.FormatGNU)
return nil
case "":
*t = TarFormat(-1)
return nil
default:
return fmt.Errorf("tarFormat: unknown format %s", value)
}
}
type RegexpDecoder struct {
Re *regexp.Regexp
}
func (r *RegexpDecoder) Decode(v string) error {
if v == "" {
return nil
}
re, err := regexp.Compile(v)
if err != nil {
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
}
*r = RegexpDecoder{Re: re}
return nil
func (t *TarFormat) Format() tar.Format {
return tar.Format(*t)
}

View File

@@ -21,17 +21,12 @@ import (
"golang.org/x/sync/errgroup"
)
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
args, _ := argv.Argv(command, nil, nil)
commandEnv := []string{
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
}
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
Cmd: args[0],
AttachStdin: true,
AttachStderr: true,
Env: commandEnv,
User: user,
})
if err != nil {
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
@@ -98,73 +93,18 @@ func (s *script) runLabeledCommands(label string) error {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
var hasDeprecatedContainers bool
if label == "docker-volume-backup.archive-pre" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-pre",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if label == "docker-volume-backup.archive-post" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-post",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if len(containersWithCommand) == 0 {
return nil
}
if hasDeprecatedContainers {
s.logger.Warn(
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
)
}
g := new(errgroup.Group)
for _, container := range containersWithCommand {
c := container
g.Go(func() error {
cmd, ok := c.Labels[label]
if !ok && label == "docker-volume-backup.archive-pre" {
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
} else if !ok && label == "docker-volume-backup.archive-post" {
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
}
userLabelName := fmt.Sprintf("%s.user", label)
user := c.Labels[userLabelName]
cmd, _ := c.Labels[label]
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
stdout, stderr, err := s.exec(c.ID, cmd, user)
stdout, stderr, err := s.exec(c.ID, cmd)
if s.c.ExecForwardOutput {
os.Stderr.Write(stderr)
os.Stdout.Write(stdout)
@@ -181,27 +121,3 @@ func (s *script) runLabeledCommands(label string) error {
}
return nil
}
type lifecyclePhase string
const (
lifecyclePhaseArchive lifecyclePhase = "archive"
lifecyclePhaseProcess lifecyclePhase = "process"
lifecyclePhaseCopy lifecyclePhase = "copy"
lifecyclePhasePrune lifecyclePhase = "prune"
)
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
if s.cli == nil {
return cb
}
return func() error {
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
}
defer func() {
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
}()
return cb()
}
}

View File

@@ -4,7 +4,6 @@
package main
import (
"errors"
"fmt"
"sort"
)
@@ -51,7 +50,7 @@ func (s *script) runHooks(err error) error {
}
}
if len(actionErrors) != 0 {
return errors.Join(actionErrors...)
return join(actionErrors...)
}
return nil
}

View File

@@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
for {
acquired, err := fileLock.TryLock()
if err != nil {
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
return noop, fmt.Errorf("lock: error trying lock: %w", err)
}
if acquired {
if s.encounteredLock {

View File

@@ -38,7 +38,14 @@ func main() {
s.logger.Info("Finished running backup tasks.")
}()
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
s.must(func() error {
runPostCommands, err := s.runCommands()
defer func() {
s.must(runPostCommands())
}()
if err != nil {
return err
}
restartContainers, err := s.stopContainers()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
@@ -49,10 +56,10 @@ func main() {
if err != nil {
return err
}
return s.createArchive()
})())
return s.takeBackup()
}())
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
s.must(s.encryptBackup())
s.must(s.copyBackup())
s.must(s.pruneBackups())
}

View File

@@ -6,9 +6,7 @@ package main
import (
"bytes"
_ "embed"
"errors"
"fmt"
"os"
"text/template"
"time"
@@ -36,16 +34,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er
titleBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
return fmt.Errorf("notifyFailure: error executing %s template: %w", titleTemplate, err)
}
bodyBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
return fmt.Errorf("notifyFailure: error executing %s template: %w", bodyTemplate, err)
}
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
return fmt.Errorf("notify: error notifying: %w", err)
return fmt.Errorf("notifyFailure: error notifying: %w", err)
}
return nil
}
@@ -69,7 +67,7 @@ func (s *script) sendNotification(title, body string) error {
}
}
if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...))
}
return nil
}
@@ -84,7 +82,6 @@ var templateHelpers = template.FuncMap{
"formatBytesBin": func(bytes uint64) string {
return formatBytes(bytes, false)
},
"env": os.Getenv,
}
// formatBytes converts an amount of bytes in a human-readable representation

View File

@@ -12,16 +12,10 @@ import (
"os"
"path"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/storage/azure"
"github.com/offen/docker-volume-backup/internal/storage/local"
"github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types"
@@ -30,17 +24,20 @@ import (
"github.com/docker/docker/client"
"github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/otiai10/copy"
"github.com/sirupsen/logrus"
"github.com/studio-b12/gowebdav"
"golang.org/x/crypto/openpgp"
"golang.org/x/sync/errgroup"
)
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct {
cli *client.Client
storages []storage.Backend
minioClient *minio.Client
webdavClient *gowebdav.Client
logger *logrus.Logger
sender *router.ServiceRouter
template *template.Template
@@ -72,13 +69,7 @@ func newScript() (*script, error) {
stats: &Stats{
StartTime: time.Now(),
LogOutput: logBuffer,
Storages: map[string]StorageStats{
"S3": {},
"WebDAV": {},
"SSH": {},
"Local": {},
"Azure": {},
},
Storages: StoragesStats{},
},
}
@@ -110,102 +101,54 @@ func newScript() (*script, error) {
s.cli = cli
}
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
switch logType {
case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...)
case storage.LogLevelError:
s.logger.Errorf("["+context+"] "+msg, params...)
case storage.LogLevelInfo:
default:
s.logger.Infof("["+context+"] "+msg, params...)
}
if s.c.AwsS3BucketName != "" {
var creds *credentials.Credentials
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
creds = credentials.NewStaticV4(
s.c.AwsAccessKeyID,
s.c.AwsSecretAccessKey,
"",
)
} else if s.c.AwsIamRoleEndpoint != "" {
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
} else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
if s.c.AwsS3BucketName != "" {
accessKeyID, err := s.c.resolveSecret(s.c.AwsAccessKeyID, s.c.AwsAccessKeyIDFile)
options := minio.Options{
Creds: creds,
Secure: s.c.AwsEndpointProto == "https",
}
if s.c.AwsEndpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsAccessKeyID: %w", err)
return nil, fmt.Errorf("newScript: failed to create default minio transport")
}
secretAccessKey, err := s.c.resolveSecret(s.c.AwsSecretAccessKey, s.c.AwsSecretAccessKeyFile)
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(s.c.AwsEndpoint, &options)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsSecretAccessKey: %w", err)
}
s3Config := s3.Config{
Endpoint: s.c.AwsEndpoint,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, s3Backend)
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
}
s.minioClient = mc
}
if s.c.WebdavUrl != "" {
webDavConfig := webdav.Config{
URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err
if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" {
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided")
} else {
s.storages = append(s.storages, webdavBackend)
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
s.webdavClient = webdavClient
}
}
if s.c.SSHHostName != "" {
sshConfig := ssh.Config{
HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, sshBackend)
}
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}
if s.c.AzureStorageAccountName != "" {
azureConfig := azure.Config{
ContainerName: s.c.AzureStorageContainerName,
AccountName: s.c.AzureStorageAccountName,
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
Endpoint: s.c.AzureStorageEndpoint,
RemotePath: s.c.AzureStoragePath,
}
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
if err != nil {
return nil, err
}
s.storages = append(s.storages, azureBackend)
}
if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf(
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
@@ -272,6 +215,22 @@ func newScript() (*script, error) {
return s, nil
}
func (s *script) runCommands() (func() error, error) {
if s.cli == nil {
return noop, nil
}
if err := s.runLabeledCommands("docker-volume-backup.exec-pre"); err != nil {
return noop, fmt.Errorf("runCommands: error running pre commands: %w", err)
}
return func() error {
if err := s.runLabeledCommands("docker-volume-backup.exec-post"); err != nil {
return fmt.Errorf("runCommands: error running post commands: %w", err)
}
return nil
}, nil
}
// stopContainers stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
@@ -284,7 +243,7 @@ func (s *script) stopContainers() (func() error, error) {
Quiet: true,
})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
@@ -300,7 +259,7 @@ func (s *script) stopContainers() (func() error, error) {
})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
}
if len(containersToStop) == 0 {
@@ -327,9 +286,9 @@ func (s *script) stopContainers() (func() error, error) {
var stopError error
if len(stopErrors) != 0 {
stopError = fmt.Errorf(
"stopContainers: %d error(s) stopping containers: %w",
"stopContainersAndRun: %d error(s) stopping containers: %w",
len(stopErrors),
errors.Join(stopErrors...),
join(stopErrors...),
)
}
@@ -364,9 +323,9 @@ func (s *script) stopContainers() (func() error, error) {
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
@@ -378,9 +337,9 @@ func (s *script) stopContainers() (func() error, error) {
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainers: %d error(s) restarting containers and services: %w",
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
len(restartErrors),
errors.Join(restartErrors...),
join(restartErrors...),
)
}
s.logger.Infof(
@@ -391,9 +350,9 @@ func (s *script) stopContainers() (func() error, error) {
}, stopError
}
// createArchive creates a tar archive of the configured backup location and
// takeBackup creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) createArchive() error {
func (s *script) takeBackup() error {
backupSources := s.c.BackupSources
if s.c.BackupFromSnapshot {
@@ -401,13 +360,13 @@ func (s *script) createArchive() error {
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
"Please use `exec-pre` and `exec-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
)
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(backupSources); err != nil {
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
return nil
@@ -416,7 +375,7 @@ func (s *script) createArchive() error {
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}
@@ -424,44 +383,25 @@ func (s *script) createArchive() error {
tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(tarFile); err != nil {
return fmt.Errorf("createArchive: error removing tar file: %w", err)
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil {
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
}
var filesEligibleForBackup []string
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
if err != nil {
return err
}
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
return nil
}
filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil
if err := createArchive(backupSources, tarFile, createArchiveOptions{
format: s.c.TarArchiveHeaderFormat.Format(),
}); err != nil {
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
}
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
}
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
return nil
}
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptArchive() error {
func (s *script) encryptBackup() error {
if s.c.GpgPassphrase == "" {
return nil
}
@@ -469,35 +409,35 @@ func (s *script) encryptArchive() error {
gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})
outFile, err := os.Create(gpgFile)
if err != nil {
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
}
defer outFile.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
}
_, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
if err != nil {
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
}
defer dst.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
}
src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
}
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
}
s.file = gpgFile
@@ -505,12 +445,12 @@ func (s *script) encryptArchive() error {
return nil
}
// copyArchive makes sure the backup file is copied to both local and remote locations
// copyBackup makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyArchive() error {
func (s *script) copyBackup() error {
_, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
} else {
size := stat.Size()
s.stats.BackupFile = BackupFileStats{
@@ -520,17 +460,45 @@ func (s *script) copyArchive() error {
}
}
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
return b.Copy(s.file)
})
if s.minioClient != nil {
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
}); err != nil {
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("copyArchive: error copying archive: %w", err)
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
}
if s.webdavClient != nil {
bytes, err := os.ReadFile(s.file)
if err != nil {
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
}
if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil {
return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err)
}
if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil {
return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err)
}
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
if s.c.BackupLatestSymlink != "" {
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
}
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
}
}
return nil
}
@@ -544,28 +512,177 @@ func (s *script) pruneBackups() error {
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
if err != nil {
// doPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
s.stats.Lock()
s.stats.Storages[b.Name()] = StorageStats{
Total: stats.Total,
Pruned: stats.Pruned,
s.logger.Infof(
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
s.c.BackupRetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description)
s.logger.Warn("Refusing to do so, please check your configuration.")
} else {
s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}
if s.minioClient != nil {
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return fmt.Errorf(
"pruneBackups: error looking up candidates from remote storage: %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.S3 = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return join(removeErrors...)
}
s.stats.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
if s.webdavClient != nil {
candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath)
if err != nil {
return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.WebDAV = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil {
return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err)
}
}
return nil
})
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
globPattern := path.Join(
s.c.BackupArchive,
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf(
"pruneBackups: error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.Local = StorageStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
doPrune(len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"pruneBackups: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
join(removeErrors...),
)
}
return nil
})
}
return nil
}

View File

@@ -5,7 +5,6 @@ package main
import (
"bytes"
"sync"
"time"
)
@@ -31,9 +30,15 @@ type StorageStats struct {
PruneErrors uint
}
// StoragesStats stats about each possible archival location (Local, WebDAV, S3)
type StoragesStats struct {
Local StorageStats
WebDAV StorageStats
S3 StorageStats
}
// Stats global stats regarding script execution
type Stats struct {
sync.Mutex
StartTime time.Time
EndTime time.Time
TookTime time.Duration
@@ -41,5 +46,5 @@ type Stats struct {
LogOutput *bytes.Buffer
Containers ContainersStats
BackupFile BackupFileStats
Storages map[string]StorageStats
Storages StoragesStats
}

View File

@@ -5,13 +5,51 @@ package main
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
)
var noop = func() error { return nil }
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}
// join takes a list of errors and joins them into a single error
func join(errs ...error) error {
if len(errs) == 1 {
return errs[0]
}
var msgs []string
for _, err := range errs {
if err == nil {
continue
}
msgs = append(msgs, err.Error())
}
return errors.New("[" + strings.Join(msgs, ", ") + "]")
}
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
@@ -46,7 +84,7 @@ type bufferingWriter struct {
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}

View File

@@ -25,16 +25,15 @@ Here is a list of all data passed to the template:
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
* `Size`: size in bytes of the backup file
* `Storages`: object that holds stats about each storage
* `Local`, `S3`, `WebDAV`, `Azure` or `SSH`:
* `Local`, `S3` or `WebDAV`:
* `Total`: total number of backup files
* `Pruned`: number of backup files that were deleted due to pruning rule
* `PruneErrors`: number of backup files that were unable to be pruned
## Functions
Some formatting and helper functions are also available:
Some formatting functions are also available:
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
* `env`: returns the value of the environment variable of the given key if set

View File

@@ -13,7 +13,6 @@ if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
else
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
crontab -r && crontab /dev/null
for file in /etc/dockervolumebackup/conf.d/*; do
source $file
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
@@ -23,4 +22,4 @@ else
fi
echo "Starting cron in foreground."
crond -f -d 8
crond -f -l 8

71
go.mod
View File

@@ -1,61 +1,60 @@
module github.com/offen/docker-volume-backup
go 1.19
go 1.18
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
github.com/containrrr/shoutrrr v0.7.1
github.com/containrrr/shoutrrr v0.5.2
github.com/cosiner/argv v0.1.0
github.com/docker/docker v20.10.24+incompatible
github.com/docker/docker v20.10.11+incompatible
github.com/gofrs/flock v0.8.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.44
github.com/otiai10/copy v1.10.0
github.com/pkg/sftp v1.13.5
github.com/sirupsen/logrus v1.9.0
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
golang.org/x/crypto v0.3.0
golang.org/x/sync v0.1.0
github.com/minio/minio-go/v7 v7.0.16
github.com/otiai10/copy v1.7.0
github.com/sirupsen/logrus v1.8.1
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/containerd/containerd v1.5.5 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fatih/color v1.10.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/protobuf v1.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nxadm/tail v1.4.6 // indirect
github.com/onsi/ginkgo v1.14.2 // indirect
github.com/onsi/gomega v1.10.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rs/xid v1.4.0 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.7.0 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect
github.com/rs/xid v1.3.0 // indirect
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
google.golang.org/grpc v1.33.2 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/ini.v1 v1.65.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

1376
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package azure
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"text/template"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/offen/docker-volume-backup/internal/storage"
)
type azureBlobStorage struct {
*storage.StorageBackend
client *azblob.Client
containerName string
}
// Config contains values that define the configuration of an Azure Blob Storage.
type Config struct {
AccountName string
ContainerName string
PrimaryAccountKey string
Endpoint string
RemotePath string
}
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
}
var ep bytes.Buffer
if err := endpointTemplate.Execute(&ep, opts); err != nil {
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
}
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
var client *azblob.Client
if opts.PrimaryAccountKey != "" {
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
}
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
} else {
cred, err := azidentity.NewManagedIdentityCredential(nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
}
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
}
storage := azureBlobStorage{
client: client,
containerName: opts.ContainerName,
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
}
return &storage, nil
}
// Name returns the name of the storage backend
func (b *azureBlobStorage) Name() string {
return "Azure"
}
// Copy copies the given file to the storage backend.
func (b *azureBlobStorage) Copy(file string) error {
fileReader, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
}
_, err = b.client.UploadStream(
context.Background(),
b.containerName,
filepath.Join(b.DestinationPath, filepath.Base(file)),
fileReader,
nil,
)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
}
return nil
}
// Prune rotates away backups according to the configuration and provided
// deadline for the Azure Blob storage backend.
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
Prefix: &lookupPrefix,
})
var matches []string
var totalCount uint
for pager.More() {
resp, err := pager.NextPage(context.Background())
if err != nil {
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
}
for _, v := range resp.Segment.BlobItems {
totalCount++
if v.Properties.LastModified.Before(deadline) {
matches = append(matches, *v.Name)
}
}
}
stats := storage.PruneStats{
Total: totalCount,
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
wg := sync.WaitGroup{}
wg.Add(len(matches))
var errs []error
for _, match := range matches {
name := match
go func() {
_, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil)
if err != nil {
errs = append(errs, err)
}
wg.Done()
}()
}
wg.Wait()
if len(errs) != 0 {
return errors.Join(errs...)
}
return nil
}); err != nil {
return &stats, err
}
return &stats, nil
}

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package local
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
)
type localStorage struct {
*storage.StorageBackend
latestSymlink string
}
// Config allows configuration of a local storage backend.
type Config struct {
ArchivePath string
LatestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend {
return &localStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.ArchivePath,
Log: logFunc,
},
latestSymlink: opts.LatestSymlink,
}
}
// Name return the name of the storage backend
func (b *localStorage) Name() string {
return "Local"
}
// Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error {
_, name := path.Split(file)
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
}
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the local storage backend.
func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
globPattern := path.Join(
b.DestinationPath,
fmt.Sprintf("%s*", pruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
errors.Join(removeErrors...),
)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

View File

@@ -1,169 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package s3
import (
"context"
"crypto/x509"
"errors"
"fmt"
"path"
"path/filepath"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage"
)
type s3Storage struct {
*storage.StorageBackend
client *minio.Client
bucket string
storageClass string
}
// Config contains values that define the configuration of a S3 backend.
type Config struct {
Endpoint string
AccessKeyID string
SecretAccessKey string
IamRoleEndpoint string
EndpointProto string
EndpointInsecure bool
RemotePath string
BucketName string
StorageClass string
CACert *x509.Certificate
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials
if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
creds = credentials.NewStaticV4(
opts.AccessKeyID,
opts.SecretAccessKey,
"",
)
} else if opts.IamRoleEndpoint != "" {
creds = credentials.NewIAM(opts.IamRoleEndpoint)
} else {
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: opts.EndpointProto == "https",
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
}
if opts.EndpointInsecure {
if !options.Secure {
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport.TLSClientConfig.InsecureSkipVerify = true
} else if opts.CACert != nil {
if transport.TLSClientConfig.RootCAs == nil {
transport.TLSClientConfig.RootCAs = x509.NewCertPool()
}
transport.TLSClientConfig.RootCAs.AddCert(opts.CACert)
}
options.Transport = transport
mc, err := minio.New(opts.Endpoint, &options)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
}
return &s3Storage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: mc,
bucket: opts.BucketName,
storageClass: opts.StorageClass,
}, nil
}
// Name returns the name of the storage backend
func (v *s3Storage) Name() string {
return "S3"
}
// Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file)
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
StorageClass: b.storageClass,
}); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return nil, fmt.Errorf(
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return errors.Join(removeErrors...)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,190 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package ssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sshStorage struct {
*storage.StorageBackend
client *ssh.Client
sftpClient *sftp.Client
hostName string
}
// Config allows to configure a SSH backend.
type Config struct {
HostName string
Port string
User string
Password string
IdentityFile string
IdentityPassphrase string
RemotePath string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod
if opts.Password != "" {
authMethods = append(authMethods, ssh.Password(opts.Password))
}
if _, err := os.Stat(opts.IdentityFile); err == nil {
key, err := ioutil.ReadFile(opts.IdentityFile)
if err != nil {
return nil, errors.New("NewStorageBackend: error reading the private key")
}
var signer ssh.Signer
if opts.IdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: opts.User,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err
}
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
}
return &sshStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: sshClient,
sftpClient: sftpClient,
hostName: opts.HostName,
}, nil
}
// Name returns the name of the storage backend
func (b *sshStorage) Name() string {
return "SSH"
}
// Copy copies the given file to the SSH storage backend.
func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return errors.New("(*sshStorage).Copy: failed to write stream")
}
break
}
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
}
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend.
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,61 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package storage
import (
"time"
)
// Backend is an interface for defining functions which all storage providers support.
type Backend interface {
Copy(file string) error
Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error)
Name() string
}
// StorageBackend is a generic type of storage. Everything here are common properties of all storage types.
type StorageBackend struct {
DestinationPath string
RetentionDays int
Log Log
}
type LogLevel int
const (
LogLevelInfo LogLevel = iota
LogLevelWarning
LogLevelError
)
type Log func(logType LogLevel, context string, msg string, params ...any)
// PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct {
Total uint
Pruned uint
}
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}

View File

@@ -1,123 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package webdav
import (
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/studio-b12/gowebdav"
)
type webDavStorage struct {
*storage.StorageBackend
client *gowebdav.Client
url string
}
// Config allows to configure a WebDAV storage backend.
type Config struct {
URL string
RemotePath string
Username string
Password string
URLInsecure bool
}
// NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
if opts.Username == "" || opts.Password == "" {
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
} else {
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
if opts.URLInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
webdavClient.SetTransport(webdavTransport)
}
return &webDavStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: webdavClient,
}, nil
}
}
// Name returns the name of the storage backend
func (b *webDavStorage) Name() string {
return "WebDAV"
}
// Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error {
_, name := path.Split(file)
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
}
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend.
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,58 +0,0 @@
version: '3'
services:
storage:
image: mcr.microsoft.com/azure-storage/azurite
volumes:
- azurite_backup_data:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
az_cli:
image: mcr.microsoft.com/azure-cli
volumes:
- ./local:/dump
command:
- /bin/sh
- -c
- |
az storage container create --name test-container
depends_on:
storage:
condition: service_healthy
environment:
AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1;
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
AZURE_STORAGE_CONTAINER_NAME: test-container
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
AZURE_STORAGE_PATH: 'path/to/backup'
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
azurite_backup_data:
name: azurite_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -1,48 +0,0 @@
version: '3'
services:
minio:
hostname: minio.local
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server --certs-dir "/certs" --address ":443" /data'
volumes:
- minio_backup_data:/data
- ./minio.crt:/certs/public.crt
- ./minio.key:/certs/private.key
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio.local:443
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
AWS_S3_BUCKET_NAME: backup
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./rootCA.crt:/root/minio-rootCA.crt
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
minio_backup_data:
name: minio_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
openssl genrsa -des3 -passout pass:test -out rootCA.key 4096
openssl req -passin pass:test \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc." \
-x509 -new -key rootCA.key -sha256 -days 1 -out rootCA.crt
openssl genrsa -out minio.key 4096
openssl req -new -sha256 -key minio.key \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc./CN=minio" \
-out minio.csr
openssl x509 -req -passin pass:test \
-in minio.csr \
-CA rootCA.crt -CAkey rootCA.key -CAcreateserial \
-extfile san.cnf \
-out minio.crt -days 1 -sha256
openssl x509 -in minio.crt -noout -text
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
docker compose down --volumes

View File

@@ -1 +0,0 @@
subjectAltName = DNS:minio.local

View File

@@ -3,8 +3,6 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker network create test_network
docker volume create backup_data
@@ -48,15 +46,21 @@ docker run --rm \
--entrypoint backup \
offen/docker-volume-backup:${TEST_VERSION:-canary}
docker run --rm \
docker run --rm -it \
-v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
pass "Found relevant files in untared remote backup."
echo "[TEST:PASS] Found relevant files in untared remote backup."
# This test does not stop containers during backup. This is happening on
# purpose in order to cover this setup as well.
expect_running_containers "2"
if [ "$(docker ps -q | wc -l)" != "2" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen)
docker volume rm backup_data app_data

View File

@@ -10,27 +10,12 @@ services:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
# this is testing the deprecated label on purpose
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.exec-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.exec-label=test
volumes:
- app_data:/tmp/volume
other_database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=touch /tmp/volume/not-relevant.txt
- docker-volume-backup.exec-label=not-relevant
volumes:
- app_data:/tmp/volume
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
@@ -42,9 +27,10 @@ services:
EXEC_LABEL: test
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- archive:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -3,45 +3,39 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p ./local
docker compose up -d
docker-compose up -d
sleep 30 # mariadb likes to take a bit before responding
docker compose exec backup backup
docker-compose exec backup backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
echo "[TEST:FAIL] Could not find file written by pre command."
exit 1
fi
pass "Found expected file."
if [ -f ./backup/data/not-relevant.txt ]; then
fail "Command ran for container with other label."
fi
pass "Command did not run for container with other label."
echo "[TEST:PASS] Found expected file."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
echo "[TEST:FAIL] File created in post command was present in backup."
exit 1
fi
pass "Did not find unexpected file."
echo "[TEST:PASS] Did not find unexpected file."
docker compose down --volumes
docker-compose down --volumes
sudo rm -rf ./local
info "Running commands test in swarm mode next."
echo "[TEST:INFO] Running commands test in swarm mode next."
mkdir -p ./local
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1
done
@@ -49,16 +43,20 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
echo "[TEST:FAIL] Could not find file written by pre command."
exit 1
fi
pass "Found expected file."
echo "[TEST:PASS] Found expected file."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
echo "[TEST:FAIL] File created in post command was present in backup."
exit 1
fi
pass "Did not find unexpected file."
echo "[TEST:PASS] Did not find unexpected file."
docker stack rm test_stack
docker swarm leave --force

View File

@@ -12,11 +12,21 @@ services:
volumes:
- minio_backup_data:/data
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- minio
- webdav
restart: always
environment:
AWS_ACCESS_KEY_ID: test
@@ -26,11 +36,18 @@ services:
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
WEBDAV_URL: http://webdav/
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
@@ -43,5 +60,5 @@ services:
volumes:
minio_backup_data:
name: minio_backup_data
webdav_backup_data:
app_data:

68
test/compose/run.sh Executable file
View File

@@ -0,0 +1,68 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker-compose exec backup backup
sleep 5
if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data alpine \
ash -c 'apk add gnupg && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
test -L /tmp/backup/app_data/db.link
echo "[TEST:PASS] Found relevant files in decrypted and untared local backup."
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
echo "[TEST:PASS] Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
exit 1
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes

View File

@@ -3,29 +3,30 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
docker-compose up -d
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100
docker compose down --volumes
docker-compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then
fail "Config from file was not used."
echo "[TEST:FAIL] Config from file was not used."
exit 1
fi
pass "Config from file was used."
echo "[TEST:PASS] Config from file was used."
if [ ! -f ./local/other.tar.gz ]; then
fail "Run on same schedule did not succeed."
echo "[TEST:FAIL] Run on same schedule did not succeed."
exit 1
fi
pass "Run on same schedule succeeded."
echo "[TEST:PASS] Run on same schedule succeeded."
if [ -f ./local/never.tar.gz ]; then
fail "Unexpected file was found."
echo "[TEST:FAIL] Unexpected file was found."
exit 1
fi
pass "Unexpected cron did not run."
echo "[TEST:PASS] Unexpected cron did not run."

View File

@@ -1,4 +0,0 @@
ARG version=canary
FROM offen/docker-volume-backup:$version
RUN apk add rsync

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'mkdir -p /tmp/unpack && tar -xvf $$COMMAND_RUNTIME_ARCHIVE_FILEPATH -C /tmp/unpack && rsync -r /tmp/unpack/backup/app_data /local'
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/local
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,29 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
export BASE_VERSION="${TEST_VERSION:-canary}"
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "2"
if [ ! -f "./local/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
docker compose down --volumes

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
GPG_PASSPHRASE: 1234#$$ecret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,33 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
expect_running_containers "2"
tmp_dir=$(mktemp -d)
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
fail "Could not find expected file in untared archive."
fi
rm ./local/decrypted.tar.gz
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup."
fi
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,15 +0,0 @@
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
volumes:
- ./local:/archive
- ./sources:/backup/data:ro

View File

@@ -1,28 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
docker compose down --volumes
out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
if [ ! -f "$out/backup/data/me.txt" ]; then
fail "Expected file was not found."
fi
pass "Expected file was found."
if [ -f "$out/backup/data/skip.me" ]; then
fail "Ignored file was found."
fi
pass "Ignored file was not found."

View File

@@ -1 +0,0 @@
local

View File

@@ -1,29 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,55 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker compose exec backup backup
sleep 5
expect_running_containers "2"
tmp_dir=$(mktemp -d)
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
rm -f ./local/test-hostnametoken.tar.gz
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
fail "Could not find expected symlink in untared archive."
fi
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
fail "Could not find symlink to latest version."
fi
pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi
pass "Local backups have not been deleted."
docker compose down --volumes

View File

@@ -10,7 +10,6 @@ services:
BACKUP_PRUNING_PREFIX: test
NOTIFICATION_LEVEL: info
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
EXTRA_VALUE: extra-value
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro

View File

@@ -1,5 +1,5 @@
{{ define "title_success" -}}
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
Successful test run, yay!
{{- end }}
{{ define "body_success" -}}

View File

@@ -3,48 +3,50 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
docker-compose up -d
sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
info "Set up Gotify application using token $GOTIFY_TOKEN"
echo "[TEST:INFO] Set up Gotify application using token $GOTIFY_TOKEN"
docker compose exec backup backup
docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then
fail "Expected no notifications to be sent when not configured"
echo "[TEST:FAIL] Expected no notifications to be sent when not configured"
exit 1
fi
pass "No notifications were sent when not configured."
echo "[TEST:PASS] No notifications were sent when not configured."
docker compose down
docker-compose down
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker compose up -d
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker-compose up -d
docker compose exec backup backup
docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then
fail "Expected one notifications to be sent when configured"
echo "[TEST:FAIL] Expected one notifications to be sent when configured"
exit 1
fi
pass "Correct number of notifications were sent when configured."
echo "[TEST:PASS] Correct number of notifications were sent when configured."
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
fail "Unexpected notification title $MESSAGE_TITLE"
if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then
echo "[TEST:FAIL] Unexpected notification title $MESSAGE_TITLE"
exit 1
fi
pass "Custom notification title was used."
echo "[TEST:PASS] Custom notification title was used."
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
fail "Unexpected notification body $MESSAGE_BODY"
echo "[TEST:FAIL] Unexpected notification body $MESSAGE_BODY"
exit 1
fi
pass "Custom notification body was used."
echo "[TEST:PASS] Custom notification body was used."
docker compose down --volumes
docker-compose down --volumes

View File

@@ -4,27 +4,25 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
docker-compose up -d
sleep 5
docker compose exec backup backup
docker-compose exec backup backup
tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
sudo tar --same-owner -xvf ./local/backup.tar.gz -C /tmp
sudo find $tmp_dir/backup/postgres > /dev/null
pass "Backup contains files at expected location"
sudo find /tmp/backup/postgres > /dev/null
echo "[TEST:PASS] Backup contains files at expected location"
for file in $(sudo find $tmp_dir/backup/postgres); do
for file in $(sudo find /tmp/backup/postgres); do
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
echo "[TEST:FAIL] Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
exit 1
fi
done
pass "All files and directories in backup preserved their ownership."
echo "[TEST:PASS] All files and directories in backup preserved their ownership."
docker compose down --volumes
docker-compose down --volumes

View File

@@ -1,42 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -1,78 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_root_user
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_root_password
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
secrets:
- minio_root_user
- minio_root_password
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data
secrets:
minio_root_user:
external: true
minio_root_password:
external: true

View File

@@ -1,44 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
printf "test" | docker secret create minio_root_user -
printf "GMusLtUmILge2by+z890kQ" | docker secret create minio_root_password -
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm \
-v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
docker stack rm test_stack
docker secret rm minio_root_password
docker secret rm minio_root_user
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,47 +0,0 @@
version: '3'
services:
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- ssh
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes:
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
ssh_backup_data:
name: ssh_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers 3
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@@ -64,6 +64,4 @@ services:
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data

View File

@@ -3,15 +3,13 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1
done
@@ -19,19 +17,19 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm \
-v backup_data:/data alpine \
docker run --rm -it \
-v test_stack_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
echo "[TEST:PASS] Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
if [ "$(docker ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,2 +0,0 @@
local
backup

View File

@@ -1,30 +0,0 @@
version: '2.4'
services:
alpine:
image: alpine:3.17.3
tty: true
volumes:
- app_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=testuser
- docker-volume-backup.archive-pre=/bin/sh -c 'whoami > /tmp/whoami.txt'
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
user_name=testuser
docker exec user-alpine-1 adduser --disabled-password "$user_name"
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/whoami.txt ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
tar -xvf ./local/test.tar.gz
if [ "$(cat ./backup/data/whoami.txt)" != "$user_name" ]; then
fail "Could not find expected user name."
fi
pass "Found expected user."
docker compose down --volumes
sudo rm -rf ./local

View File

@@ -1,23 +0,0 @@
#!/bin/sh
set -e
info () {
echo "[test:${current_test:-none}:info] "$1""
}
pass () {
echo "[test:${current_test:-none}:pass] "$1""
}
fail () {
echo "[test:${current_test:-none}:fail] "$1""
exit 1
}
expect_running_containers () {
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
fi
pass "$1 containers running."
}

View File

@@ -1,45 +0,0 @@
version: '3'
services:
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- webdav
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
webdav_backup_data:
name: webdav_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes