Compare commits

...

92 Commits

Author SHA1 Message Date
Frederik Ring
f4138fd733 Run httpd server in container if requested 2023-08-04 14:30:47 +02:00
Frederik Ring
1c6f64e254 Current Docker client breaks in newer Go versions (#241)
* Current Docker client breaks in newer Go versions

* Cater for breaking API changes in Docker client

* Update Docker client

* Unpin Go version used for build

* Tidy sum file
2023-07-25 19:46:57 +02:00
dependabot[bot]
085d2c5dfd Bump github.com/minio/minio-go/v7 from 7.0.59 to 7.0.61 (#240) 2023-07-24 19:16:02 +00:00
dependabot[bot]
b1382dee00 Bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob (#239) 2023-07-24 19:15:56 +00:00
Frederik Ring
c3732107b1 Current Docker client breaks in Go 1.20.6 (#242) 2023-07-24 21:01:28 +02:00
dependabot[bot]
d288c87c54 Bump github.com/minio/minio-go/v7 from 7.0.58 to 7.0.59 (#238) 2023-07-04 07:53:19 +00:00
dependabot[bot]
47491439a1 Bump github.com/studio-b12/gowebdav (#235) 2023-06-27 07:46:34 +00:00
dependabot[bot]
94f71ac765 Bump github.com/minio/minio-go/v7 from 7.0.57 to 7.0.58 (#236) 2023-06-27 05:25:42 +00:00
dependabot[bot]
2addf1dd6c Bump golang.org/x/sync from 0.2.0 to 0.3.0 (#234) 2023-06-20 12:29:11 +00:00
dependabot[bot]
c07990eaf6 Bump github.com/minio/minio-go/v7 from 7.0.56 to 7.0.57 (#233) 2023-06-20 12:28:06 +00:00
jsloane
a27743bd32 Update README.md (#230) 2023-06-17 08:30:48 +02:00
dependabot[bot]
9d5b897ab4 Bump golang.org/x/sync from 0.1.0 to 0.2.0 (#229) 2023-06-13 07:17:58 +00:00
dependabot[bot]
30bf31cd90 Bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob (#228) 2023-06-13 07:04:03 +00:00
dependabot[bot]
32e9a05b40 Bump github.com/minio/minio-go/v7 from 7.0.44 to 7.0.56 (#227) 2023-06-13 07:03:38 +00:00
dependabot[bot]
b302884447 Bump golang.org/x/crypto from 0.3.0 to 0.9.0 (#223) 2023-06-10 13:15:41 +00:00
dependabot[bot]
b3e1ce27be Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity (#225) 2023-06-10 12:57:44 +00:00
dependabot[bot]
66518ed0ff Bump github.com/sirupsen/logrus from 1.9.0 to 1.9.3 (#226) 2023-06-10 12:57:39 +00:00
dependabot[bot]
14d966d41a Bump github.com/otiai10/copy from 1.10.0 to 1.11.0 (#224) 2023-06-10 12:57:30 +00:00
Frederik Ring
336dece328 Set up automated updates for Docker base images and Go packages 2023-06-10 14:42:28 +02:00
Frederik Ring
dc8172b673 Use alpine-1.18 as the base image (#219) 2023-06-03 13:08:35 +02:00
Erwan LE PRADO
5ea9a7ce15 feat: add better handler for part size (#214)
* feat: add better handler for part size


fix: use local file 


fix: try with another path


fix: use bytes 


chore: go back


go back readme


goback


goback


goback

* chore: better handling

* fix: typo readme

* chore: wrong comparaison

* fix: typo
2023-06-02 16:30:02 +02:00
Frederik Ring
bcffe0bc25 Clarify docs section about user executing labeled commands 2023-05-26 16:15:09 +02:00
dependabot[bot]
144e65ce6f Bump github.com/docker/distribution (#215) 2023-05-11 21:07:45 +00:00
ba-tno
07afa53cd3 Update shoutrrr to 0.7 (#213)
* Replace docker-compose reference with docker[space]compose

* Update shoutrrr only to 0.7.1

* modules after go mod tidy

* Refer to v0.7 docs of shoutrrr

* Replace docker-compose reference with docker[space]compose

* Update shoutrrr only to 0.7.1

* modules after go mod tidy

* Refer to v0.7 docs of shoutrrr

* Remove 'v' from shoutrrr doc link
2023-04-29 20:14:04 +02:00
Frederik Ring
9a07f5486b Docs reference incorrect shoutrrr version 2023-04-29 14:44:39 +02:00
Frederik Ring
d4c5f65f31 Entrypoint permissions can be set on COPY (#211) 2023-04-28 20:06:57 +02:00
Frederik Ring
5b8a484d80 Documentation around user label is lacking 2023-04-28 16:01:17 +02:00
Frederik Ring
37c01a578c TaskTemplate.ForceUpdate is a counter (#209) 2023-04-26 08:45:12 +02:00
Frederik Ring
46c6441d48 Add note about GHCR to README 2023-04-07 12:00:39 +02:00
Frederik Ring
5715d9ff9b Update of package copy does not fail on deleted files (#206) 2023-04-07 11:28:36 +02:00
dependabot[bot]
6ba173d916 Bump github.com/docker/docker (#205) 2023-04-05 04:58:07 +00:00
Frederik Ring
301fe6628c on: is expected to be an object 2023-04-02 19:45:46 +02:00
Frederik Ring
5ff2d53602 Items in on: are expected to be objects 2023-04-02 19:44:51 +02:00
Frederik Ring
cddd1fdcea Prevent duplicate builds on pull request 2023-04-02 19:41:49 +02:00
Frederik Ring
808cf8f82d Local directory can be used instead of volume for storing test artifact (#204) 2023-04-02 19:41:00 +02:00
Frederik Ring
c177202ac1 Multi platform build requires explicit buildx setup 2023-04-02 11:51:35 +02:00
Frederik Ring
27c2201161 Branches filter is a glob pattern, not a regex 2023-04-02 11:46:06 +02:00
Diulgher Artiom
7f20036b15 Possibility to use -u (user) option in docker exec (#203)
* Add user option for docker exec

* Add test for user option

* Return test version for image

* remove gitea config file

* refactor tests

* remove comments & fix image name

* add docs

* cleanup

* Update README.md with suggested correction

Co-authored-by: Frederik Ring <frederik.ring@gmail.com>

* fix backup command & bind folder instead of volume

---------

Co-authored-by: tao <generaltao.md@gmail.com>
Co-authored-by: Frederik Ring <frederik.ring@gmail.com>
2023-04-02 11:12:10 +02:00
Frederik Ring
2ac1f0cea4 Also trigger test runs on Pull Request 2023-03-29 07:57:09 +02:00
Frederik Ring
66ad124ddd any can be used instead of interface{} 2023-03-16 19:48:12 +01:00
Frederik Ring
aee802cb09 Migrate CI setup to GitHub Actions, also publish to GHCR (#199)
* Run tests in GitHub actions

* Do not try to allocate a pseudo TTY when running compose commands

* Try hard disabling TTY allocation

* Use compose plugin

* Test scripts shall not try to allocate a TTY

* Pass correct base version

* Check whether env var is even needed

* Stop running tests in CircleCI

* Run releases from GitHub actions as well

* Manually construct tags to be pushed on release
2023-03-16 19:32:44 +01:00
dependabot[bot]
a06ad1957a Bump github.com/docker/distribution (#195) 2023-03-07 06:56:56 +00:00
dependabot[bot]
15786c5da3 Bump golang.org/x/net from 0.2.0 to 0.7.0 (#191) 2023-02-18 06:34:40 +00:00
dependabot[bot]
641a3203c7 Bump github.com/containerd/containerd from 1.6.6 to 1.6.18 (#190) 2023-02-16 19:32:46 +00:00
Frederik Ring
5adfe3989e Document usage with rootless Docker installations
As described in #189
2023-02-16 08:18:57 +01:00
dependabot[bot]
550833be33 Merge pull request #188 from offen/dependabot/go_modules/github.com/containrrr/shoutrrr-0.6.0 2023-02-14 19:09:43 +00:00
dependabot[bot]
201a983ea4 Bump github.com/containrrr/shoutrrr from 0.5.2 to 0.6.0
Bumps [github.com/containrrr/shoutrrr](https://github.com/containrrr/shoutrrr) from 0.5.2 to 0.6.0.
- [Release notes](https://github.com/containrrr/shoutrrr/releases)
- [Changelog](https://github.com/containrrr/shoutrrr/blob/main/goreleaser.yml)
- [Commits](https://github.com/containrrr/shoutrrr/compare/v0.5.2...v0.6.0)

---
updated-dependencies:
- dependency-name: github.com/containrrr/shoutrrr
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-02-14 18:57:32 +00:00
Frederik Ring
2d37e08743 Use go 1.20, join errors using stdlib (#182)
* Use go 1.20, join errors using stdlib

* Use go 1.20 proper
2023-02-02 21:07:25 +01:00
Frederik Ring
1e36bd3eb7 Non-streaming upload to WebDAV fails on big files (#181) 2023-01-16 08:28:29 +01:00
Frederik Ring
e93a74dd48 Instructions in issue templates are not supposed to be shown after submission 2023-01-12 18:02:46 +01:00
Frederik Ring
f799e6c2e9 Azure Blob Storage is missing from headline in README 2023-01-11 21:54:50 +01:00
Frederik Ring
5c04e11f10 Add support for Azure Blob Storage (#171)
* Scaffold Azure storage backend that does nothing yet

* Implement copy for Azure Blob Storage

* Set up automated testing for Azure Storage

* Implement pruning for Azure blob storage

* Add documentation for Azure Blob Storage

* Add support for remote path

* Add azure to notifications doc

* Tidy go.mod file

* Allow use of managed identity credential

* Use volume in tests

* Auto append trailing slash to endpoint if needed, clarify docs, tidy mod file
2023-01-11 21:40:48 +01:00
Frederik Ring
aadbaa741d Update intro in README 2023-01-08 09:39:32 +01:00
Frederik Ring
9b7af67a26 Run tests using compose v2 (#178) 2023-01-07 18:50:27 +01:00
Frederik Ring
1cb4883458 Update alpine base image to 3.17 (#177) 2023-01-05 19:46:47 +01:00
Frederik Ring
982f4fe191 Fix mistake in README 2022-12-30 16:16:46 +01:00
Frederik Ring
63961cd826 Pass file location to lifecycle commands (#173)
* Add test case for extending image and calling through to rsync

* Keep backup file location env var

* Add documentation

* Work against untared content in test
2022-12-30 16:07:34 +01:00
Frederik Ring
9534cde7d9 Allow use of a custom ca cert when working against S3 storages (#170) 2022-12-22 14:37:51 +01:00
XF
08bafdb054 Update MinIO Go SDK to v7.0.44 (#167)
Adds support for the eu-south-2, eu-central-2, me-central-1, ap-southeast-3 AWS endpoints
2022-11-21 20:30:13 +01:00
Frederik Ring
907deecdd0 Call ListObjects without WithMetadata option (#165) 2022-10-23 21:56:44 +02:00
Frederik Ring
92b888e72c Remove debugging remnant from test 2022-10-17 20:41:10 +02:00
Frederik Ring
3925ac1ee0 Special characters in password do not break GPG test case 2022-10-17 19:42:38 +02:00
Frederik Ring
5c7856feb3 Consider failed casting to error response, use established minio bootstrap in tests 2022-10-13 19:40:41 +02:00
Frederik Ring
dec7d7e2c0 Lock version of Docker Credential Helper in CI 2022-10-12 20:23:40 +02:00
pixxon
b5cc1262e2 add aws secret handling (#161)
* add aws secret handling

* make it look go-ish

* fix tests

* whitespace

* sleep a bit
2022-10-12 19:14:57 +02:00
Frederik Ring
00c83dfac7 Fix more error strings 2022-09-15 10:49:45 +02:00
Frederik Ring
eb9a198327 Ensure consistency in error messages 2022-09-15 10:04:12 +02:00
Frederik Ring
97e975a535 Add FUNDING.yml 2022-09-02 09:39:55 +02:00
Frederik Ring
749a7a15a6 Build using Go 1.19 (#153) 2022-09-01 15:12:48 +02:00
Frederik Ring
a6ec128cab Run copying and pruning against multiple storages in parallel (#152) 2022-09-01 14:38:04 +02:00
Frederik Ring
695a94d479 Add template for support request issue 2022-09-01 14:30:42 +02:00
Frederik Ring
2316111892 Fix key location in container in SSH example 2022-08-29 17:10:07 +02:00
Frederik Ring
b60c747448 Fix WebDAV spelling, remove some inconsistencies (#143)
* Simplify logging, fix WebDAV spelling

* Define options types per package

* Move util functions that are not used cross package

* Add per file license headers

* Rename config type
2022-08-18 12:37:45 +02:00
MaxJa4
279844ccfb Added abstract helper interface for all storage backends (#135)
* Added abstract helper interface and implemented it for all storage backends

* Moved storage client initializations also to helper classes

* Fixed ssh init issue

* Moved script parameter to helper struct to simplify script init.

* Created sub modules. Enhanced abstract implementation.

* Fixed config issue

* Fixed declaration issues. Added config to interface.

* Added StorageProviders to unify all backends.

* Cleanup, optimizations, comments.

* Applied discussed changes. See description.

Moved modules to internal packages.
Replaced StoragePool with slice.
Moved conditional for init of storage backends back to script.

* Fix docker build issue

* Fixed accidentally removed local copy condition.

* Delete .gitignore

* Renaming/changes according to review

Renamed Init functions and interface.
Replaced config object with specific config values.
Init func returns interface instead of struct.
Removed custom import names where possible.

* Fixed auto-complete error.

* Combined copy instructions into one layer.

* Added logging func for storages.

* Introduced logging func for errors too.

* Missed an error message

* Moved config back to main. Optimized prune stats handling.

* Move stats back to main package

* Code doc stuff

* Apply changes from #136

* Replace name field with function.

* Changed receiver names from stg to b.

* Renamed LogFuncDef to Log

* Removed redundant package name.

* Renamed storagePool to storages.

* Simplified creation of new storage backend.

* Added initialization for storage stats map.

* Invert .dockerignore patterns.

* Fix package typo
2022-08-18 12:37:45 +02:00
Frederik Ring
4ec88d14dd Update issue templates (#145) 2022-08-18 10:59:34 +02:00
Frederik Ring
599b7f3f74 Use crontab command to recreate empty tab file (#141) 2022-08-15 15:00:58 +02:00
Frederik Ring
b2d4c48082 Update base image to alpine:3.16 (#124) 2022-08-15 09:25:47 +02:00
MaxJa4
2b7f0c52c0 Print more error info for minio (#136)
* Print more error info for minio

* Unpacked error info
2022-08-15 09:25:32 +02:00
Frederik Ring
cc912d7b64 Delete existing crontab before appending entries per conf.d (#140) 2022-08-15 09:25:19 +02:00
Frederik Ring
26c8ba971f Add test case for exec label (#132) 2022-07-15 09:34:01 +02:00
Alexander Zimmermann
3f10d0f817 Update README.md (#130)
Replace deprecated exec-pre label
2022-07-14 13:47:54 +02:00
Frederik Ring
b441cf3e2b Fine grained labels (#115)
* Refactor label command mechanism to be more flexible

* Run all steps wrapped in labeled commands

* Rename methods to be in line with lifecycle

* Deprecate exec-pre and exec-post labels

* Add documentation

* Use type alias for lifecycle phases

* Fix bad imports

* Fix command lookup for deprecated labels

* Use more generic naming for lifecycle phase

* Fail on erroneous post command

* Update documentation
2022-07-10 10:36:56 +02:00
Frederik Ring
82f66565da Add further docs on backup selection when using conf.d 2022-07-08 14:05:45 +02:00
Frederik Ring
d68814be9d Add section about shoutrrr CLI tool to README 2022-07-07 22:22:21 +02:00
Erwan LE PRADO
3661a4b49b feat: Add storage class header (#119)
* feat: Add storage class header

* doc: change the readme

* chore: Remove the unnecessary default  value
2022-07-06 13:18:12 +02:00
Frederik Ring
e738bd0539 Make crond log to stderr so Docker can forward it (#120) 2022-07-06 13:16:43 +02:00
Frederik Ring
342ae5910e Add env template helper (#121) 2022-07-06 13:16:32 +02:00
Frederik Ring
c2a8cc92fc Untangle tests (#112)
* Isolate S3 test case

* Isolate webdav test case

* Isolate SSH test case

* Isolate local storage test case

* Isolate gpg test case

* Add missing volume mount

* Fix file locations for local test case

* Remove compose test case, use utils

* Use test utils throughout

* Use dedicated tmp dir

* Fix link location that is being tested

* Use dedicated tmp_dirs when working on host fs

* Force delete artifact

* Fix expected filename

* Provide helpful messages on failing tests

* Fix filename

* Use proper volume names

* Fix syntax error, use large resource class

* Use named Docker volumes when referencing them in test scripts

* Add name of test case to logging output
2022-06-23 14:40:29 +02:00
Frederik Ring
1892d56ff6 Change default value for SSH identity file (#108)
* Change default value for SSH identity file

* Force remove write protected file in tests
2022-06-17 11:28:29 +02:00
İbrahim Akyel
0b205fe6dc SSH Backup Storage Support (#107)
* SSH Client implemented

* Private key auth implemented
Code refactoring

* Refactoring

* Passphrase renamed to IdentityPassphrase
Default private key location changed to .ssh/id
2022-06-17 11:06:15 +02:00
Frederik Ring
8c8a2fa088 Update vulnerable containerd dependency (#104) 2022-06-07 09:21:40 +02:00
Frederik Ring
a850bf13fe Fix broken link in README 2022-05-12 08:18:12 +02:00
70 changed files with 3518 additions and 1380 deletions

View File

@@ -1,72 +0,0 @@
version: 2.1
jobs:
canary:
machine:
image: ubuntu-2004:202201-02
working_directory: ~/docker-volume-backup
steps:
- checkout
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Install gnupg
command: |
sudo apt-get install -y gnupg
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
export GPG_TTY=$(tty)
./test.sh canary
build:
docker:
- image: cimg/base:2020.06
environment:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
command: |
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
docker run --rm --privileged linuxkit/binfmt:v0.8
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
tag_args="-t offen/docker-volume-backup:$CIRCLE_TAG"
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
tag_args="$tag_args -t offen/docker-volume-backup:latest"
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
fi
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
$tag_args . --push
workflows:
version: 2
docker_image:
jobs:
- canary:
filters:
tags:
ignore: /^v.*/
- build:
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
orbs:
docker: circleci/docker@1.0.1

View File

@@ -1 +1,7 @@
test
.github
.circleci
docs
.editorconfig
LICENSE
README.md

3
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
github: offen
patreon: offen

View File

@@ -1,20 +0,0 @@
* **I'm submitting a ...**
- [ ] bug report
- [ ] feature request
- [ ] support request
* **What is the current behavior?**
* **If the current behavior is a bug, please provide the configuration and steps to reproduce and if possible a minimal demo of the problem.**
* **What is the expected behavior?**
* **What is the motivation / use case for changing the behavior?**
* **Please tell us about your environment:**
- Image version:
- Docker version:
- docker-compose version:
* **Other information** (e.g. detailed explanation, stacktraces, related issues, suggestions how to fix, links for us to have context, eg. stackoverflow, etc)

34
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,34 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
**To Reproduce**
Steps to reproduce the behavior:
1. ...
2. ...
3. ...
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Version (please complete the following information):**
- Image Version: <!-- e.g. v2.21.0 -->
- Docker Version: <!-- e.g. 20.10.17 -->
- Docker Compose Version (if applicable): <!-- e.g. 1.29.2 -->
**Additional context**
<!--
Add any other context about the problem here.
-->

View File

@@ -0,0 +1,28 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->

View File

@@ -0,0 +1,28 @@
---
name: Support request
about: Ask for help
title: ''
labels: ''
assignees: ''
---
**What are you trying to do?**
<!--
A clear and concise description of what you are trying to do, but cannot get working.
-->
**What is your current configuration?**
<!--
Add the full configuration you are using. Please redact out any real-world credentials.
-->
**Log output**
<!--
Provide the full log output of your setup.
-->
**Additional context**
<!--
Add any other context or screenshots about the support request here.
-->

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: docker
directory: /
schedule:
interval: weekly
- package-ecosystem: gomod
directory: /
schedule:
interval: weekly

59
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
name: Release Docker Image
on:
push:
tags: v**
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract Docker tags
id: meta
run: |
version_tag="${{github.ref_name}}"
tags=($version_tag)
if [[ "$version_tag" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest` nor `v2`
tags+=("latest")
tags+=($(echo "$version_tag" | cut -d. -f1))
fi
releases=""
for tag in "${tags[@]}"; do
releases="${releases:+$releases,}offen/docker-volume-backup:$tag,ghcr.io/offen/docker-volume-backup:$tag"
done
echo "releases=$releases" >> "$GITHUB_OUTPUT"
- name: Build and push Docker images
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.releases }}

30
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: Run Integration Tests
on:
push:
branches:
- main
pull_request:
jobs:
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build Docker Image
env:
DOCKER_BUILDKIT: '1'
run: docker build . -t offen/docker-volume-backup:test
- name: Run Tests
working-directory: ./test
run: |
# Stop the buildx container so the tests can make assertions
# about the number of running containers
docker rm -f $(docker ps -aq)
export GPG_TTY=$(tty)
./test.sh test

View File

@@ -1,24 +1,21 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.18-alpine as builder
FROM golang:1.20-alpine as builder
WORKDIR /app
COPY go.mod go.sum ./
COPY . .
RUN go mod download
COPY cmd/backup ./cmd/backup/
WORKDIR /app/cmd/backup
RUN go build -o backup .
FROM alpine:3.15
FROM alpine:3.18
WORKDIR /root
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache ca-certificates busybox-extras
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh
COPY --chmod=755 ./entrypoint.sh /root/
ENTRYPOINT ["/root/entrypoint.sh"]

348
README.md
View File

@@ -4,23 +4,24 @@
# docker-volume-backup
Backup Docker volumes locally or to any S3 compatible storage.
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage or SSH compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
<!-- MarkdownTOC -->
- [Quickstart](#quickstart)
- [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup)
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
- [Available image registries](#available-image-registries)
- [Configuration reference](#configuration-reference)
- [How to](#how-to)
- [Stop containers during backup](#stop-containers-during-backup)
- [Automatically pruning old backups](#automatically-pruning-old-backups)
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
- [Customize notifications](#customize-notifications)
- [Run custom commands before / after backup](#run-custom-commands-before--after-backup)
- [Run custom commands during the backup lifecycle](#run-custom-commands-during-the-backup-lifecycle)
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
@@ -28,14 +29,21 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
- [Manually triggering a backup](#manually-triggering-a-backup)
- [Update deprecated email configuration](#update-deprecated-email-configuration)
- [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage)
- [Replace deprecated `exec-pre` and `exec-post` labels](#replace-deprecated-exec-pre-and-exec-post-labels)
- [Using a custom Docker host](#using-a-custom-docker-host)
- [Use with rootless Docker](#use-with-rootless-docker)
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
- [Define different retention schedules](#define-different-retention-schedules)
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
- [Recipes](#recipes)
- [Backing up to AWS S3](#backing-up-to-aws-s3)
- [Backing up to Filebase](#backing-up-to-filebase)
- [Backing up to MinIO](#backing-up-to-minio)
- [Backing up to MinIO \(using Docker secrets\)](#backing-up-to-minio-using-docker-secrets)
- [Backing up to WebDAV](#backing-up-to-webdav)
- [Backing up to SSH](#backing-up-to-ssh)
- [Backing up to Azure Blob Storage](#backing-up-to-azure-blob-storage)
- [Backing up locally](#backing-up-locally)
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
@@ -114,6 +122,18 @@ docker run --rm \
Alternatively, pass a `--env-file` in order to use a full config as described below.
### Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
```
docker pull offen/docker-volume-backup:v2
docker pull ghcr.io/offen/docker-volume-backup:v2
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Configuration reference
Backup targets, schedule and retention are configured in environment variables.
@@ -193,6 +213,14 @@ You can populate below template according to your requirements and use it as you
# AWS_ACCESS_KEY_ID="<xxx>"
# AWS_SECRET_ACCESS_KEY="<xxx>"
# It is possible to provide the keys in files, allowing to hide the sensitive data.
# These values have a higher priority than the ones above, meaning if both are set
# the values from the files will be used.
# This option is most useful with Docker [secrets](https://docs.docker.com/engine/swarm/secrets/).
# AWS_ACCESS_KEY_ID_FILE="/path/to/file"
# AWS_SECRET_ACCESS_KEY_FILE="/path/to/file"
# Instead of providing static credentials, you can also use IAM instance profiles
# or similar to provide authentication. Some possible configuration options on AWS:
# - EC2: http://169.254.169.254
@@ -220,6 +248,27 @@ You can populate below template according to your requirements and use it as you
# AWS_ENDPOINT_INSECURE="true"
# If you wish to use self signed certificates your S3 server, you can pass
# the location of a PEM encoded CA certificate and it will be used for
# validating your certificates.
# Alternatively, pass a PEM encoded string containing the certificate.
# AWS_ENDPOINT_CA_CERT="/path/to/cert.pem"
# Setting this variable will change the S3 storage class header.
# Defaults to "STANDARD", you can set this value according to your needs.
# AWS_STORAGE_CLASS="GLACIER"
# Setting this variable will change the S3 default part size for the copy step.
# This value is useful when you want to upload large files.
# NB : While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
# While Minio uses a hard coded value to 10.000. As a workaround, try to set a higher value.
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
# The unit is in MB and an integer.
# AWS_PART_SIZE=16
# You can also backup files to any WebDAV server:
# The URL of the remote WebDAV server
@@ -245,6 +294,59 @@ You can populate below template according to your requirements and use it as you
# WEBDAV_URL_INSECURE="true"
# You can also backup files to any SSH server:
# The URL of the remote SSH server
# SSH_HOST_NAME="server.local"
# The port of the remote SSH server
# Optional variable default value is `22`
# SSH_PORT=2222
# The Directory to place the backups to on the SSH server.
# SSH_REMOTE_PATH="/my/directory/"
# The username for the SSH server
# SSH_USER="user"
# The password for the SSH server
# SSH_PASSWORD="password"
# The private key path in container for SSH server
# Default value: /root/.ssh/id_rsa
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
# also work.
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
# The passphrase for the identity file
# SSH_IDENTITY_PASSPHRASE="pass"
# The credential's account name when using Azure Blob Storage. This has to be
# set when using Azure Blob Storage.
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
# The credential's primary account key when using Azure Blob Storage. If this
# is not given, the command tries to fall back to using a managed identity.
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
# The container name when using Azure Blob Storage.
# AZURE_STORAGE_CONTAINER_NAME="container-name"
# The service endpoint when using Azure Blob Storage. This is a template that
# can be passed the account name as shown in the default value below.
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
# In addition to storing backups remotely, you can also keep local copies.
# Pass a container-local path to store your backups if needed. You also need to
# mount a local folder or Docker volume into that location (`/archive`
@@ -310,7 +412,7 @@ You can populate below template according to your requirements and use it as you
# It is possible to define commands to be run in any container before and after
# a backup is conducted. The commands themselves are defined in labels like
# `docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
# `docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump [options] > dump.sql'.
# Several options exist for controlling this feature:
# By default, any output of such a command is suppressed. If this value
@@ -331,7 +433,7 @@ You can populate below template according to your requirements and use it as you
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
# Configuration is provided as a comma-separated list of URLs as consumed
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
# The content of such notifications can be customized. Dedicated documentation
# on how to do this can be found in the README. When providing multiple URLs or
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
@@ -395,7 +497,7 @@ You can populate below template according to your requirements and use it as you
# EMAIL_SMTP_PORT="<port>"
```
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`].
In case you encouter double quoted values in your configuration you might be running an [older version of `docker-compose`][compose-issue].
You can work around this by either updating `docker-compose` or unquoting your configuration values.
[compose-issue]: https://github.com/docker/compose/issues/2854
@@ -475,7 +577,7 @@ services:
Notification backends other than email are also supported.
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
### Customize notifications
@@ -502,11 +604,16 @@ Overridable template names are: `title_success`, `body_success`, `title_failure`
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
### Run custom commands before / after backup
### Run custom commands during the backup lifecycle
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container.
Such commands are defined by specifying the command in a `docker-volume-backup.exec-[pre|post]` label.
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecyle:
- `archive` (the tar archive is created)
- `process` (the tar archive is processed, e.g. encrypted - optional)
- `copy` (the tar archive is copied to all configured storages)
- `prune` (existing backups are pruned based on the defined ruleset - optional)
Taking a database dump using `mysqldump` would look like this:
@@ -520,7 +627,7 @@ services:
volumes:
- backup_data:/tmp/backups
labels:
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /backups/dump.sql'
volumes:
backup_data:
@@ -540,7 +647,7 @@ services:
volumes:
- backup_data:/tmp/backups
labels:
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.exec-label=database
backup:
@@ -556,9 +663,27 @@ volumes:
```
The backup procedure is guaranteed to wait for all `pre` commands to finish.
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
However there are no guarantees about the order in which they are run, which could also happen concurrently.
By default the backup command is executed by the user provided by the container's image.
It is possible to specify a custom user that is used to run commands in dedicated labels with the format `docker-volume-backup.[step]-[pre|post].user`:
```yml
version: '3'
services:
gitea:
image: gitea/gitea
volumes:
- backup_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=git
- docker-volume-backup.archive-pre=/bin/bash -c 'cd /tmp; /usr/local/bin/gitea dump -c /data/gitea/conf/app.ini -R -f dump.zip'
```
Make sure the user exists and is present in `passwd` inside the target container.
### Encrypting your backup using GPG
The image supports encrypting backups using GPG out of the box.
@@ -682,7 +807,7 @@ NOTIFICATION_URLS=smtp://me:secret@posteo.de:587/?fromAddress=no-reply@example.c
### Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
Starting with version 2.15.0, the `BACKUP_FROM_SNAPSHOT` feature has been deprecated.
If you need to prepare your sources before the backup is taken, use `exec-pre`, `exec-post` and an intermediate volume:
If you need to prepare your sources before the backup is taken, use `archive-pre`, `archive-post` and an intermediate volume:
```yml
version: '3'
@@ -694,11 +819,11 @@ services:
- data:/var/my_app
- backup:/tmp/backup
labels:
- docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
- docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
- docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
- docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
backup:
image: offen/docker-volume-backup:latest
image: offen/docker-volume-backup:v2
environment:
BACKUP_SOURCES: /tmp/backup
volumes:
@@ -710,6 +835,23 @@ volumes:
backup:
```
### Replace deprecated `exec-pre` and `exec-post` labels
Version 2.19.0 introduced the option to run labeled commands at multiple points in time during the backup lifecycle.
In order to be able to use more obvious terminology in the new labels, the existing `exec-pre` and `exec-post` labels have been deprecated.
If you want to emulate the existing behavior, all you need to do is change `exec-pre` to `archive-pre` and `exec-post` to `archive-post`:
```diff
labels:
- - docker-volume-backup.exec-pre=cp -r /var/my_app /tmp/backup/my-app
+ - docker-volume-backup.archive-pre=cp -r /var/my_app /tmp/backup/my-app
- - docker-volume-backup.exec-post=rm -rf /tmp/backup/my-app
+ - docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
```
The `EXEC_LABEL` setting and the `docker-volume-backup.exec-label` label stay as is.
Check the additional documentation on running commands during the backup lifecycle to find out about further possibilities.
### Using a custom Docker host
If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL.
@@ -719,6 +861,23 @@ DOCKER_HOST=tcp://docker_socket_proxy:2375
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
### Use with rootless Docker
It's also possible to use this image with a [rootless Docker installation][rootless-docker].
Instead of mounting `/var/run/docker.sock`, mount the user-specific socket into the container:
```yml
services:
backup:
image: offen/docker-volume-backup:v2
# ... configuration omitted
volumes:
- backup:/backup:ro
- /run/user/1000/docker.sock:/var/run/docker.sock:ro
```
[rootless-docker]: https://docs.docker.com/engine/security/rootless/
### Run multiple backup schedules in the same container
Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`:
@@ -746,11 +905,30 @@ The exact order of schedules that use the same cron expression is not specified.
In case you need your schedules to overlap, you need to create a dedicated container for each schedule instead.
When changing the configuration, you currently need to manually restart the container for the changes to take effect.
Set `BACKUP_SOURCES` for each config file to control which subset of volume mounts gets backed up:
```yml
# With a volume configuration like this:
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./configuration:/etc/dockervolumebackup/conf.d
- app1_data:/backup/app1_data:ro
- app2_data:/backup/app2_data:ro
```
```ini
# In the 1st config file:
BACKUP_SOURCES=/backup/app1_data
# In the 2nd config file:
BACKUP_SOURCES=/backup/app2_data
```
### Define different retention schedules
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup/conf.d`:
```ini
# 01daily.conf
@@ -779,6 +957,60 @@ BACKUP_CRON_EXPRESSION="0 4 1 * *"
Note that while it's possible to define colliding cron schedules for each of these configurations, you might need to adjust the value for `LOCK_TIMEOUT` in case your backups are large and might take longer than an hour.
### Use special characters in notification URLs
The value given to `NOTIFICATION_URLS` is a comma separated list of URLs.
If such a URL contains special characters (e.g. commas) it needs to be URL encoded.
To get an encoded version of your URL, you can use the CLI tool provided by `shoutrrr` (which is the library used for sending notifications):
```
docker run --rm -ti containrrr/shoutrrr generate [service]
```
where service is any of the [supported services][shoutrrr-docs], e.g. for SMTP:
```
docker run --rm -ti containrrr/shoutrrr generate smtp
```
### Handle file uploads using third party tools
If you want to use a non-supported storage backend, or want to use a third party (e.g. rsync, rclone) tool for file uploads, you can build a Docker image containing the required binaries off this one, and call through to these in lifecycle hooks.
For example, if you wanted to use `rsync`, define your Docker image like this:
```Dockerfile
FROM offen/docker-volume-backup:v2
RUN apk add rsync
```
Using this image, you can now omit configuring any of the supported storage backends, and instead define your own mechanism in a `docker-volume-backup.copy-post` label:
```yml
version: '3'
services:
backup:
image: your-custom-image
restart: always
environment:
BACKUP_FILENAME: "daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
BACKUP_CRON_EXPRESSION: "0 2 * * *"
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'rsync $$COMMAND_RUNTIME_ARCHIVE_FILEPATH /destination'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
# other services defined here ...
volumes:
app_data:
```
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
## Recipes
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
@@ -848,6 +1080,38 @@ volumes:
data:
```
### Backing up to MinIO (using Docker secrets)
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_ENDPOINT: minio.example.com
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_access_key
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_secret_key
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
secrets:
- minio_access_key
- minio_secret_key
volumes:
data:
secrets:
minio_access_key:
# ... define how secret is accessed
minio_secret_key:
# ... define how secret is accessed
```
### Backing up to WebDAV
```yml
@@ -870,6 +1134,50 @@ volumes:
data:
```
### Backing up to SSH
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
SSH_HOST_NAME: server.local
SSH_PORT: 2222
SSH_USER: user
SSH_REMOTE_PATH: /data
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /path/to/private_key:/root/.ssh/id_rsa
volumes:
data:
```
### Backing up to Azure Blob Storage
```yml
version: '3'
services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:v2
environment:
AZURE_STORAGE_CONTAINER_NAME: backup-container
AZURE_STORAGE_ACCOUNT_NAME: account-name
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
volumes:
data:
```
### Backing up locally
```yml
@@ -991,9 +1299,9 @@ services:
database:
image: mariadb:latest
labels:
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
volumes:
- app_data:/tmp/dumps
- data:/tmp/dumps
backup:
image: offen/docker-volume-backup:v2
environment:

View File

@@ -63,7 +63,7 @@ func compress(paths []string, outFilePath, subPath string) error {
for _, p := range paths {
if err := writeTarGz(p, tarWriter, prefix); err != nil {
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
}
}

View File

@@ -4,7 +4,11 @@
package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"regexp"
"time"
)
@@ -12,6 +16,19 @@ import (
// Config holds all configuration values that are expected to be set
// by users.
type Config struct {
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
AwsPartSize int64 `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"`
@@ -23,14 +40,6 @@ type Config struct {
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"`
@@ -45,9 +54,53 @@ type Config struct {
WebdavPath string `split_words:"true" default:"/"`
WebdavUsername string `split_words:"true"`
WebdavPassword string `split_words:"true"`
SSHHostName string `split_words:"true"`
SSHPort string `split_words:"true" default:"22"`
SSHUser string `split_words:"true"`
SSHPassword string `split_words:"true"`
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
SSHIdentityPassphrase string `split_words:"true"`
SSHRemotePath string `split_words:"true"`
ExecLabel string `split_words:"true"`
ExecForwardOutput bool `split_words:"true"`
LockTimeout time.Duration `split_words:"true" default:"60m"`
AzureStorageAccountName string `split_words:"true"`
AzureStoragePrimaryAccountKey string `split_words:"true"`
AzureStorageContainerName string `split_words:"true"`
AzureStoragePath string `split_words:"true"`
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
}
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
if secretPath == "" {
return envVar, nil
}
data, err := os.ReadFile(secretPath)
if err != nil {
return "", fmt.Errorf("resolveSecret: error reading secret path: %w", err)
}
return string(data), nil
}
type CertDecoder struct {
Cert *x509.Certificate
}
func (c *CertDecoder) Decode(v string) error {
if v == "" {
return nil
}
content, err := ioutil.ReadFile(v)
if err != nil {
content = []byte(v)
}
block, _ := pem.Decode(content)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("config: error parsing certificate: %w", err)
}
*c = CertDecoder{Cert: cert}
return nil
}
type RegexpDecoder struct {

View File

@@ -21,12 +21,17 @@ import (
"golang.org/x/sync/errgroup"
)
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
args, _ := argv.Argv(command, nil, nil)
commandEnv := []string{
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
}
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
Cmd: args[0],
AttachStdin: true,
AttachStderr: true,
Env: commandEnv,
User: user,
})
if err != nil {
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
@@ -86,25 +91,77 @@ func (s *script) runLabeledCommands(label string) error {
})
}
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
var hasDeprecatedContainers bool
if label == "docker-volume-backup.archive-pre" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-pre",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if label == "docker-volume-backup.archive-post" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-post",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if len(containersWithCommand) == 0 {
return nil
}
if hasDeprecatedContainers {
s.logger.Warn(
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
)
}
g := new(errgroup.Group)
for _, container := range containersWithCommand {
c := container
g.Go(func() error {
cmd, _ := c.Labels[label]
cmd, ok := c.Labels[label]
if !ok && label == "docker-volume-backup.archive-pre" {
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
} else if !ok && label == "docker-volume-backup.archive-post" {
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
}
userLabelName := fmt.Sprintf("%s.user", label)
user := c.Labels[userLabelName]
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
stdout, stderr, err := s.exec(c.ID, cmd)
stdout, stderr, err := s.exec(c.ID, cmd, user)
if s.c.ExecForwardOutput {
os.Stderr.Write(stderr)
os.Stdout.Write(stdout)
@@ -121,3 +178,27 @@ func (s *script) runLabeledCommands(label string) error {
}
return nil
}
type lifecyclePhase string
const (
lifecyclePhaseArchive lifecyclePhase = "archive"
lifecyclePhaseProcess lifecyclePhase = "process"
lifecyclePhaseCopy lifecyclePhase = "copy"
lifecyclePhasePrune lifecyclePhase = "prune"
)
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
if s.cli == nil {
return cb
}
return func() error {
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
}
defer func() {
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
}()
return cb()
}
}

View File

@@ -4,6 +4,7 @@
package main
import (
"errors"
"fmt"
"sort"
)
@@ -50,7 +51,7 @@ func (s *script) runHooks(err error) error {
}
}
if len(actionErrors) != 0 {
return join(actionErrors...)
return errors.Join(actionErrors...)
}
return nil
}

View File

@@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
for {
acquired, err := fileLock.TryLock()
if err != nil {
return noop, fmt.Errorf("lock: error trying lock: %w", err)
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
}
if acquired {
if s.encounteredLock {

View File

@@ -38,14 +38,7 @@ func main() {
s.logger.Info("Finished running backup tasks.")
}()
s.must(func() error {
runPostCommands, err := s.runCommands()
defer func() {
s.must(runPostCommands())
}()
if err != nil {
return err
}
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
restartContainers, err := s.stopContainers()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
@@ -56,10 +49,10 @@ func main() {
if err != nil {
return err
}
return s.takeBackup()
}())
return s.createArchive()
})())
s.must(s.encryptBackup())
s.must(s.copyBackup())
s.must(s.pruneBackups())
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
}

View File

@@ -6,7 +6,9 @@ package main
import (
"bytes"
_ "embed"
"errors"
"fmt"
"os"
"text/template"
"time"
@@ -34,16 +36,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er
titleBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
return fmt.Errorf("notifyFailure: error executing %s template: %w", titleTemplate, err)
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
}
bodyBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
return fmt.Errorf("notifyFailure: error executing %s template: %w", bodyTemplate, err)
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
}
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
return fmt.Errorf("notifyFailure: error notifying: %w", err)
return fmt.Errorf("notify: error notifying: %w", err)
}
return nil
}
@@ -67,7 +69,7 @@ func (s *script) sendNotification(title, body string) error {
}
}
if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...))
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
}
return nil
}
@@ -82,6 +84,7 @@ var templateHelpers = template.FuncMap{
"formatBytesBin": func(bytes uint64) string {
return formatBytes(bytes, false)
},
"env": os.Getenv,
}
// formatBytes converts an amount of bytes in a human-readable representation

View File

@@ -9,36 +9,39 @@ import (
"fmt"
"io"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/storage/azure"
"github.com/offen/docker-volume-backup/internal/storage/local"
"github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types"
ctr "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/otiai10/copy"
"github.com/sirupsen/logrus"
"github.com/studio-b12/gowebdav"
"golang.org/x/crypto/openpgp"
"golang.org/x/sync/errgroup"
)
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct {
cli *client.Client
minioClient *minio.Client
webdavClient *gowebdav.Client
storages []storage.Backend
logger *logrus.Logger
sender *router.ServiceRouter
template *template.Template
@@ -70,7 +73,13 @@ func newScript() (*script, error) {
stats: &Stats{
StartTime: time.Now(),
LogOutput: logBuffer,
Storages: StoragesStats{},
Storages: map[string]StorageStats{
"S3": {},
"WebDAV": {},
"SSH": {},
"Local": {},
"Azure": {},
},
},
}
@@ -102,61 +111,101 @@ func newScript() (*script, error) {
s.cli = cli
}
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
switch logType {
case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...)
case storage.LogLevelError:
s.logger.Errorf("["+context+"] "+msg, params...)
case storage.LogLevelInfo:
default:
s.logger.Infof("["+context+"] "+msg, params...)
}
}
if s.c.AwsS3BucketName != "" {
var creds *credentials.Credentials
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
creds = credentials.NewStaticV4(
s.c.AwsAccessKeyID,
s.c.AwsSecretAccessKey,
"",
)
} else if s.c.AwsIamRoleEndpoint != "" {
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
accessKeyID, err := s.c.resolveSecret(s.c.AwsAccessKeyID, s.c.AwsAccessKeyIDFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsAccessKeyID: %w", err)
}
secretAccessKey, err := s.c.resolveSecret(s.c.AwsSecretAccessKey, s.c.AwsSecretAccessKeyFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsSecretAccessKey: %w", err)
}
s3Config := s3.Config{
Endpoint: s.c.AwsEndpoint,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert,
PartSize: s.c.AwsPartSize,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err
} else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
s.storages = append(s.storages, s3Backend)
}
options := minio.Options{
Creds: creds,
Secure: s.c.AwsEndpointProto == "https",
}
if s.c.AwsEndpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("newScript: failed to create default minio transport")
}
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(s.c.AwsEndpoint, &options)
if err != nil {
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
}
s.minioClient = mc
}
if s.c.WebdavUrl != "" {
if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" {
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided")
webDavConfig := webdav.Config{
URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err
} else {
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
s.webdavClient = webdavClient
if s.c.WebdavUrlInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure
s.webdavClient.SetTransport(webdavTransport)
s.storages = append(s.storages, webdavBackend)
}
}
if s.c.SSHHostName != "" {
sshConfig := ssh.Config{
HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, sshBackend)
}
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}
if s.c.AzureStorageAccountName != "" {
azureConfig := azure.Config{
ContainerName: s.c.AzureStorageContainerName,
AccountName: s.c.AzureStorageAccountName,
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
Endpoint: s.c.AzureStorageEndpoint,
RemotePath: s.c.AzureStoragePath,
}
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
if err != nil {
return nil, err
}
s.storages = append(s.storages, azureBackend)
}
if s.c.EmailNotificationRecipient != "" {
@@ -225,22 +274,6 @@ func newScript() (*script, error) {
return s, nil
}
func (s *script) runCommands() (func() error, error) {
if s.cli == nil {
return noop, nil
}
if err := s.runLabeledCommands("docker-volume-backup.exec-pre"); err != nil {
return noop, fmt.Errorf("runCommands: error running pre commands: %w", err)
}
return func() error {
if err := s.runLabeledCommands("docker-volume-backup.exec-post"); err != nil {
return fmt.Errorf("runCommands: error running post commands: %w", err)
}
return nil
}, nil
}
// stopContainers stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
@@ -249,11 +282,9 @@ func (s *script) stopContainers() (func() error, error) {
return noop, nil
}
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
})
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
@@ -261,7 +292,6 @@ func (s *script) stopContainers() (func() error, error) {
s.c.BackupStopContainerLabel,
)
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: containerLabel,
@@ -269,7 +299,7 @@ func (s *script) stopContainers() (func() error, error) {
})
if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
return noop, fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
}
if len(containersToStop) == 0 {
@@ -286,7 +316,7 @@ func (s *script) stopContainers() (func() error, error) {
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
@@ -296,9 +326,9 @@ func (s *script) stopContainers() (func() error, error) {
var stopError error
if len(stopErrors) != 0 {
stopError = fmt.Errorf(
"stopContainersAndRun: %d error(s) stopping containers: %w",
"stopContainers: %d error(s) stopping containers: %w",
len(stopErrors),
join(stopErrors...),
errors.Join(stopErrors...),
)
}
@@ -333,9 +363,9 @@ func (s *script) stopContainers() (func() error, error) {
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
@@ -347,9 +377,9 @@ func (s *script) stopContainers() (func() error, error) {
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
"stopContainers: %d error(s) restarting containers and services: %w",
len(restartErrors),
join(restartErrors...),
errors.Join(restartErrors...),
)
}
s.logger.Infof(
@@ -360,9 +390,9 @@ func (s *script) stopContainers() (func() error, error) {
}, stopError
}
// takeBackup creates a tar archive of the configured backup location and
// createArchive creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) takeBackup() error {
func (s *script) createArchive() error {
backupSources := s.c.BackupSources
if s.c.BackupFromSnapshot {
@@ -370,13 +400,13 @@ func (s *script) takeBackup() error {
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use `exec-pre` and `exec-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
)
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(backupSources); err != nil {
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
return nil
@@ -385,7 +415,7 @@ func (s *script) takeBackup() error {
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}
@@ -393,7 +423,7 @@ func (s *script) takeBackup() error {
tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(tarFile); err != nil {
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
return fmt.Errorf("createArchive: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
@@ -401,7 +431,7 @@ func (s *script) takeBackup() error {
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil {
return fmt.Errorf("takeBackup: error getting absolute path: %w", err)
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
}
var filesEligibleForBackup []string
@@ -416,21 +446,21 @@ func (s *script) takeBackup() error {
filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil
}); err != nil {
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
}
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
}
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
return nil
}
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptBackup() error {
func (s *script) encryptArchive() error {
if s.c.GpgPassphrase == "" {
return nil
}
@@ -438,35 +468,35 @@ func (s *script) encryptBackup() error {
gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})
outFile, err := os.Create(gpgFile)
defer outFile.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
}
defer outFile.Close()
_, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
defer dst.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
}
defer dst.Close()
src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
}
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
}
s.file = gpgFile
@@ -474,12 +504,12 @@ func (s *script) encryptBackup() error {
return nil
}
// copyBackup makes sure the backup file is copied to both local and remote locations
// copyArchive makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyBackup() error {
func (s *script) copyArchive() error {
_, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
} else {
size := stat.Size()
s.stats.BackupFile = BackupFileStats{
@@ -489,45 +519,17 @@ func (s *script) copyBackup() error {
}
}
if s.minioClient != nil {
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
}); err != nil {
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
return b.Copy(s.file)
})
}
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
if err := eg.Wait(); err != nil {
return fmt.Errorf("copyArchive: error copying archive: %w", err)
}
if s.webdavClient != nil {
bytes, err := os.ReadFile(s.file)
if err != nil {
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
}
if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil {
return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err)
}
if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil {
return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err)
}
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
if s.c.BackupLatestSymlink != "" {
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
}
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
}
}
return nil
}
@@ -541,177 +543,28 @@ func (s *script) pruneBackups() error {
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
// doPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
if err != nil {
return err
}
s.logger.Infof(
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
s.c.BackupRetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description)
s.logger.Warn("Refusing to do so, please check your configuration.")
} else {
s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}
if s.minioClient != nil {
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return fmt.Errorf(
"pruneBackups: error looking up candidates from remote storage: %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.S3 = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return join(removeErrors...)
s.stats.Lock()
s.stats.Storages[b.Name()] = StorageStats{
Total: stats.Total,
Pruned: stats.Pruned,
}
s.stats.Unlock()
return nil
})
}
if s.webdavClient != nil {
candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath)
if err != nil {
return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
}
s.stats.Storages.WebDAV = StorageStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil {
return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err)
}
}
return nil
})
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
globPattern := path.Join(
s.c.BackupArchive,
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf(
"pruneBackups: error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return fmt.Errorf(
"pruneBackups: error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
s.stats.Storages.Local = StorageStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
doPrune(len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"pruneBackups: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
join(removeErrors...),
)
}
return nil
})
}
return nil
}

View File

@@ -5,6 +5,7 @@ package main
import (
"bytes"
"sync"
"time"
)
@@ -30,15 +31,9 @@ type StorageStats struct {
PruneErrors uint
}
// StoragesStats stats about each possible archival location (Local, WebDAV, S3)
type StoragesStats struct {
Local StorageStats
WebDAV StorageStats
S3 StorageStats
}
// Stats global stats regarding script execution
type Stats struct {
sync.Mutex
StartTime time.Time
EndTime time.Time
TookTime time.Duration
@@ -46,5 +41,5 @@ type Stats struct {
LogOutput *bytes.Buffer
Containers ContainersStats
BackupFile BackupFileStats
Storages StoragesStats
Storages map[string]StorageStats
}

View File

@@ -5,51 +5,13 @@ package main
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
)
var noop = func() error { return nil }
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}
// join takes a list of errors and joins them into a single error
func join(errs ...error) error {
if len(errs) == 1 {
return errs[0]
}
var msgs []string
for _, err := range errs {
if err == nil {
continue
}
msgs = append(msgs, err.Error())
}
return errors.New("[" + strings.Join(msgs, ", ") + "]")
}
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
@@ -84,7 +46,7 @@ type bufferingWriter struct {
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}

View File

@@ -25,15 +25,16 @@ Here is a list of all data passed to the template:
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
* `Size`: size in bytes of the backup file
* `Storages`: object that holds stats about each storage
* `Local`, `S3` or `WebDAV`:
* `Local`, `S3`, `WebDAV`, `Azure` or `SSH`:
* `Total`: total number of backup files
* `Pruned`: number of backup files that were deleted due to pruning rule
* `PruneErrors`: number of backup files that were unable to be pruned
## Functions
Some formatting functions are also available:
Some formatting and helper functions are also available:
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
* `env`: returns the value of the environment variable of the given key if set

View File

@@ -13,6 +13,7 @@ if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
else
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
crontab -r && crontab /dev/null
for file in /etc/dockervolumebackup/conf.d/*; do
source $file
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
@@ -21,5 +22,12 @@ else
done
fi
if [ ! -z "$SERVE_METRICS_PATH" ]; then
mkdir -p /var/www/html${SERVE_METRICS_PATH}
echo "ok" > /var/www/html${SERVE_METRICS_PATH}/metrics.txt
httpd -h /var/www/html -p "${SERVE_METRICS_PORT:-80}"
echo "Serving metrics on port ${SERVE_METRICS_PORT:-80}."
fi
echo "Starting cron in foreground."
crond -f -l 8
crond -f -d 8

75
go.mod
View File

@@ -1,60 +1,61 @@
module github.com/offen/docker-volume-backup
go 1.18
go 1.19
require (
github.com/containrrr/shoutrrr v0.5.2
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/containrrr/shoutrrr v0.7.1
github.com/cosiner/argv v0.1.0
github.com/docker/docker v20.10.11+incompatible
github.com/docker/docker v24.0.5+incompatible
github.com/gofrs/flock v0.8.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.16
github.com/otiai10/copy v1.7.0
github.com/sirupsen/logrus v1.8.1
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
github.com/minio/minio-go/v7 v7.0.61
github.com/otiai10/copy v1.11.0
github.com/pkg/sftp v1.13.5
github.com/sirupsen/logrus v1.9.3
github.com/studio-b12/gowebdav v0.9.0
golang.org/x/crypto v0.11.0
golang.org/x/sync v0.3.0
)
require (
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/containerd/containerd v1.5.5 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.10.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nxadm/tail v1.4.6 // indirect
github.com/onsi/ginkgo v1.14.2 // indirect
github.com/onsi/gomega v1.10.3 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rs/xid v1.3.0 // indirect
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
google.golang.org/grpc v1.33.2 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/ini.v1 v1.65.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
github.com/rs/xid v1.5.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect
)

1389
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,160 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package azure
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"text/template"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/offen/docker-volume-backup/internal/storage"
)
type azureBlobStorage struct {
*storage.StorageBackend
client *azblob.Client
containerName string
}
// Config contains values that define the configuration of an Azure Blob Storage.
type Config struct {
AccountName string
ContainerName string
PrimaryAccountKey string
Endpoint string
RemotePath string
}
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
}
var ep bytes.Buffer
if err := endpointTemplate.Execute(&ep, opts); err != nil {
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
}
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
var client *azblob.Client
if opts.PrimaryAccountKey != "" {
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
}
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
} else {
cred, err := azidentity.NewManagedIdentityCredential(nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
}
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
}
storage := azureBlobStorage{
client: client,
containerName: opts.ContainerName,
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
}
return &storage, nil
}
// Name returns the name of the storage backend
func (b *azureBlobStorage) Name() string {
return "Azure"
}
// Copy copies the given file to the storage backend.
func (b *azureBlobStorage) Copy(file string) error {
fileReader, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
}
_, err = b.client.UploadStream(
context.Background(),
b.containerName,
filepath.Join(b.DestinationPath, filepath.Base(file)),
fileReader,
nil,
)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
}
return nil
}
// Prune rotates away backups according to the configuration and provided
// deadline for the Azure Blob storage backend.
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
Prefix: &lookupPrefix,
})
var matches []string
var totalCount uint
for pager.More() {
resp, err := pager.NextPage(context.Background())
if err != nil {
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
}
for _, v := range resp.Segment.BlobItems {
totalCount++
if v.Properties.LastModified.Before(deadline) {
matches = append(matches, *v.Name)
}
}
}
stats := storage.PruneStats{
Total: totalCount,
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
wg := sync.WaitGroup{}
wg.Add(len(matches))
var errs []error
for _, match := range matches {
name := match
go func() {
_, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil)
if err != nil {
errs = append(errs, err)
}
wg.Done()
}()
}
wg.Wait()
if len(errs) != 0 {
return errors.Join(errs...)
}
return nil
}); err != nil {
return &stats, err
}
return &stats, nil
}

View File

@@ -0,0 +1,160 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package local
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
)
type localStorage struct {
*storage.StorageBackend
latestSymlink string
}
// Config allows configuration of a local storage backend.
type Config struct {
ArchivePath string
LatestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend {
return &localStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.ArchivePath,
Log: logFunc,
},
latestSymlink: opts.LatestSymlink,
}
}
// Name return the name of the storage backend
func (b *localStorage) Name() string {
return "Local"
}
// Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error {
_, name := path.Split(file)
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
}
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the local storage backend.
func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
globPattern := path.Join(
b.DestinationPath,
fmt.Sprintf("%s*", pruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
errors.Join(removeErrors...),
)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

189
internal/storage/s3/s3.go Normal file
View File

@@ -0,0 +1,189 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package s3
import (
"context"
"crypto/x509"
"errors"
"fmt"
"os"
"path"
"path/filepath"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage"
)
type s3Storage struct {
*storage.StorageBackend
client *minio.Client
bucket string
storageClass string
partSize int64
}
// Config contains values that define the configuration of a S3 backend.
type Config struct {
Endpoint string
AccessKeyID string
SecretAccessKey string
IamRoleEndpoint string
EndpointProto string
EndpointInsecure bool
RemotePath string
BucketName string
StorageClass string
PartSize int64
CACert *x509.Certificate
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials
if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
creds = credentials.NewStaticV4(
opts.AccessKeyID,
opts.SecretAccessKey,
"",
)
} else if opts.IamRoleEndpoint != "" {
creds = credentials.NewIAM(opts.IamRoleEndpoint)
} else {
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: opts.EndpointProto == "https",
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
}
if opts.EndpointInsecure {
if !options.Secure {
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport.TLSClientConfig.InsecureSkipVerify = true
} else if opts.CACert != nil {
if transport.TLSClientConfig.RootCAs == nil {
transport.TLSClientConfig.RootCAs = x509.NewCertPool()
}
transport.TLSClientConfig.RootCAs.AddCert(opts.CACert)
}
options.Transport = transport
mc, err := minio.New(opts.Endpoint, &options)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
}
return &s3Storage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: mc,
bucket: opts.BucketName,
storageClass: opts.StorageClass,
partSize: opts.PartSize,
}, nil
}
// Name returns the name of the storage backend
func (v *s3Storage) Name() string {
return "S3"
}
// Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file)
putObjectOptions := minio.PutObjectOptions{
ContentType: "application/tar+gzip",
StorageClass: b.storageClass,
}
if b.partSize > 0 {
srcFileInfo, err := os.Stat(file)
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
}
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
}
putObjectOptions.PartSize = uint64(partSize)
}
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return nil, fmt.Errorf(
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return errors.Join(removeErrors...)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

190
internal/storage/ssh/ssh.go Normal file
View File

@@ -0,0 +1,190 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package ssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sshStorage struct {
*storage.StorageBackend
client *ssh.Client
sftpClient *sftp.Client
hostName string
}
// Config allows to configure a SSH backend.
type Config struct {
HostName string
Port string
User string
Password string
IdentityFile string
IdentityPassphrase string
RemotePath string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod
if opts.Password != "" {
authMethods = append(authMethods, ssh.Password(opts.Password))
}
if _, err := os.Stat(opts.IdentityFile); err == nil {
key, err := ioutil.ReadFile(opts.IdentityFile)
if err != nil {
return nil, errors.New("NewStorageBackend: error reading the private key")
}
var signer ssh.Signer
if opts.IdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: opts.User,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err
}
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
}
return &sshStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: sshClient,
sftpClient: sftpClient,
hostName: opts.HostName,
}, nil
}
// Name returns the name of the storage backend
func (b *sshStorage) Name() string {
return "SSH"
}
// Copy copies the given file to the SSH storage backend.
func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return errors.New("(*sshStorage).Copy: failed to write stream")
}
break
}
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
}
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend.
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -0,0 +1,61 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package storage
import (
"time"
)
// Backend is an interface for defining functions which all storage providers support.
type Backend interface {
Copy(file string) error
Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error)
Name() string
}
// StorageBackend is a generic type of storage. Everything here are common properties of all storage types.
type StorageBackend struct {
DestinationPath string
RetentionDays int
Log Log
}
type LogLevel int
const (
LogLevelInfo LogLevel = iota
LogLevelWarning
LogLevelError
)
type Log func(logType LogLevel, context string, msg string, params ...any)
// PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct {
Total uint
Pruned uint
}
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}

View File

@@ -0,0 +1,123 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package webdav
import (
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/studio-b12/gowebdav"
)
type webDavStorage struct {
*storage.StorageBackend
client *gowebdav.Client
url string
}
// Config allows to configure a WebDAV storage backend.
type Config struct {
URL string
RemotePath string
Username string
Password string
URLInsecure bool
}
// NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
if opts.Username == "" || opts.Password == "" {
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
} else {
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
if opts.URLInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
webdavClient.SetTransport(webdavTransport)
}
return &webDavStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: webdavClient,
}, nil
}
}
// Name returns the name of the storage backend
func (b *webDavStorage) Name() string {
return "WebDAV"
}
// Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error {
_, name := path.Split(file)
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
}
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend.
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -0,0 +1,58 @@
version: '3'
services:
storage:
image: mcr.microsoft.com/azure-storage/azurite
volumes:
- azurite_backup_data:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
az_cli:
image: mcr.microsoft.com/azure-cli
volumes:
- ./local:/dump
command:
- /bin/sh
- -c
- |
az storage container create --name test-container
depends_on:
storage:
condition: service_healthy
environment:
AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1;
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
AZURE_STORAGE_CONTAINER_NAME: test-container
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
AZURE_STORAGE_PATH: 'path/to/backup'
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
azurite_backup_data:
name: azurite_backup_data
app_data:

40
test/azure/run.sh Normal file
View File

@@ -0,0 +1,40 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -0,0 +1,48 @@
version: '3'
services:
minio:
hostname: minio.local
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server --certs-dir "/certs" --address ":443" /data'
volumes:
- minio_backup_data:/data
- ./minio.crt:/certs/public.crt
- ./minio.key:/certs/private.key
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio.local:443
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
AWS_S3_BUCKET_NAME: backup
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./rootCA.crt:/root/minio-rootCA.crt
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
minio_backup_data:
name: minio_backup_data
app_data:

43
test/certs/run.sh Normal file
View File

@@ -0,0 +1,43 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
openssl genrsa -des3 -passout pass:test -out rootCA.key 4096
openssl req -passin pass:test \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc." \
-x509 -new -key rootCA.key -sha256 -days 1 -out rootCA.crt
openssl genrsa -out minio.key 4096
openssl req -new -sha256 -key minio.key \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc./CN=minio" \
-out minio.csr
openssl x509 -req -passin pass:test \
-in minio.csr \
-CA rootCA.crt -CAkey rootCA.key -CAcreateserial \
-extfile san.cnf \
-out minio.crt -days 1 -sha256
openssl x509 -in minio.crt -noout -text
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
docker compose down --volumes

1
test/certs/san.cnf Normal file
View File

@@ -0,0 +1 @@
subjectAltName = DNS:minio.local

View File

@@ -3,6 +3,8 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker network create test_network
docker volume create backup_data
@@ -46,21 +48,15 @@ docker run --rm \
--entrypoint backup \
offen/docker-volume-backup:${TEST_VERSION:-canary}
docker run --rm -it \
docker run --rm \
-v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
echo "[TEST:PASS] Found relevant files in untared remote backup."
pass "Found relevant files in untared remote backup."
# This test does not stop containers during backup. This is happening on
# purpose in order to cover this setup as well.
if [ "$(docker ps -q | wc -l)" != "2" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
expect_running_containers "2"
docker rm $(docker stop minio offen)
docker volume rm backup_data app_data

View File

@@ -10,12 +10,27 @@ services:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
# this is testing the deprecated label on purpose
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.exec-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.exec-label=test
volumes:
- app_data:/tmp/volume
other_database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=touch /tmp/volume/not-relevant.txt
- docker-volume-backup.exec-label=not-relevant
volumes:
- app_data:/tmp/volume
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
@@ -27,10 +42,9 @@ services:
EXEC_LABEL: test
EXEC_FORWARD_OUTPUT: "true"
volumes:
- archive:/archive
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -3,39 +3,45 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p ./local
docker-compose up -d
docker compose up -d
sleep 30 # mariadb likes to take a bit before responding
docker-compose exec backup backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
echo "[TEST:FAIL] Could not find file written by pre command."
exit 1
fail "Could not find file written by pre command."
fi
echo "[TEST:PASS] Found expected file."
pass "Found expected file."
if [ -f ./backup/data/not-relevant.txt ]; then
fail "Command ran for container with other label."
fi
pass "Command did not run for container with other label."
if [ -f ./backup/data/post.txt ]; then
echo "[TEST:FAIL] File created in post command was present in backup."
exit 1
fail "File created in post command was present in backup."
fi
echo "[TEST:PASS] Did not find unexpected file."
pass "Did not find unexpected file."
docker-compose down --volumes
docker compose down --volumes
sudo rm -rf ./local
echo "[TEST:INFO] Running commands test in swarm mode next."
info "Running commands test in swarm mode next."
mkdir -p ./local
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying."
info "Backup container not ready yet. Retrying."
sleep 1
done
@@ -43,20 +49,16 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
echo "[TEST:FAIL] Could not find file written by pre command."
exit 1
fail "Could not find file written by pre command."
fi
echo "[TEST:PASS] Found expected file."
pass "Found expected file."
if [ -f ./backup/data/post.txt ]; then
echo "[TEST:FAIL] File created in post command was present in backup."
exit 1
fail "File created in post command was present in backup."
fi
echo "[TEST:PASS] Did not find unexpected file."
pass "Did not find unexpected file."
docker stack rm test_stack
docker swarm leave --force

View File

@@ -1,68 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker-compose exec backup backup
sleep 5
if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data alpine \
ash -c 'apk add gnupg && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xvf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
test -L /tmp/backup/app_data/db.link
echo "[TEST:PASS] Found relevant files in decrypted and untared local backup."
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
echo "[TEST:PASS] Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_minio_backup_data:/minio_data \
-v compose_webdav_backup_data:/webdav_data alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
exit 1
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes

View File

@@ -3,30 +3,29 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
docker compose up -d
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100
docker-compose down --volumes
docker compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then
echo "[TEST:FAIL] Config from file was not used."
exit 1
fail "Config from file was not used."
fi
echo "[TEST:PASS] Config from file was used."
pass "Config from file was used."
if [ ! -f ./local/other.tar.gz ]; then
echo "[TEST:FAIL] Run on same schedule did not succeed."
exit 1
fail "Run on same schedule did not succeed."
fi
echo "[TEST:PASS] Run on same schedule succeeded."
pass "Run on same schedule succeeded."
if [ -f ./local/never.tar.gz ]; then
echo "[TEST:FAIL] Unexpected file was found."
exit 1
fail "Unexpected file was found."
fi
echo "[TEST:PASS] Unexpected cron did not run."
pass "Unexpected cron did not run."

4
test/extend/Dockerfile Normal file
View File

@@ -0,0 +1,4 @@
ARG version=canary
FROM offen/docker-volume-backup:$version
RUN apk add rsync

View File

@@ -0,0 +1,26 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'mkdir -p /tmp/unpack && tar -xvf $$COMMAND_RUNTIME_ARCHIVE_FILEPATH -C /tmp/unpack && rsync -r /tmp/unpack/backup/app_data /local'
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/local
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

29
test/extend/run.sh Normal file
View File

@@ -0,0 +1,29 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
export BASE_VERSION="${TEST_VERSION:-canary}"
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "2"
if [ ! -f "./local/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
docker compose down --volumes

View File

@@ -0,0 +1,26 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
GPG_PASSPHRASE: 1234#$$ecret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

33
test/gpg/run.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
expect_running_containers "2"
tmp_dir=$(mktemp -d)
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
fail "Could not find expected file in untared archive."
fi
rm ./local/decrypted.tar.gz
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup."
fi
docker compose down --volumes

View File

@@ -3,25 +3,26 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
docker compose up -d
sleep 5
docker-compose exec backup backup
docker compose exec backup backup
docker-compose down --volumes
docker compose down --volumes
out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
if [ ! -f "$out/backup/data/me.txt" ]; then
echo "[TEST:FAIL] Expected file was not found."
exit 1
fail "Expected file was not found."
fi
echo "[TEST:PASS] Expected file was found."
pass "Expected file was found."
if [ -f "$out/backup/data/skip.me" ]; then
echo "[TEST:FAIL] Ignored file was found."
exit 1
fail "Ignored file was found."
fi
echo "[TEST:PASS] Ignored file was not found."
pass "Ignored file was not found."

1
test/local/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
local

View File

@@ -0,0 +1,29 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

55
test/local/run.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker compose exec backup backup
sleep 5
expect_running_containers "2"
tmp_dir=$(mktemp -d)
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
rm -f ./local/test-hostnametoken.tar.gz
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
fail "Could not find expected symlink in untared archive."
fi
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
fail "Could not find symlink to latest version."
fi
pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi
pass "Local backups have not been deleted."
docker compose down --volumes

View File

@@ -10,6 +10,7 @@ services:
BACKUP_PRUNING_PREFIX: test
NOTIFICATION_LEVEL: info
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
EXTRA_VALUE: extra-value
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro

View File

@@ -1,5 +1,5 @@
{{ define "title_success" -}}
Successful test run, yay!
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
{{- end }}
{{ define "body_success" -}}

View File

@@ -3,50 +3,48 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
docker compose up -d
sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
echo "[TEST:INFO] Set up Gotify application using token $GOTIFY_TOKEN"
info "Set up Gotify application using token $GOTIFY_TOKEN"
docker-compose exec backup backup
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then
echo "[TEST:FAIL] Expected no notifications to be sent when not configured"
exit 1
fail "Expected no notifications to be sent when not configured"
fi
echo "[TEST:PASS] No notifications were sent when not configured."
pass "No notifications were sent when not configured."
docker-compose down
docker compose down
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker-compose up -d
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker compose up -d
docker-compose exec backup backup
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then
echo "[TEST:FAIL] Expected one notifications to be sent when configured"
exit 1
fail "Expected one notifications to be sent when configured"
fi
echo "[TEST:PASS] Correct number of notifications were sent when configured."
pass "Correct number of notifications were sent when configured."
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then
echo "[TEST:FAIL] Unexpected notification title $MESSAGE_TITLE"
exit 1
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
fail "Unexpected notification title $MESSAGE_TITLE"
fi
echo "[TEST:PASS] Custom notification title was used."
pass "Custom notification title was used."
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
echo "[TEST:FAIL] Unexpected notification body $MESSAGE_BODY"
exit 1
fail "Unexpected notification body $MESSAGE_BODY"
fi
echo "[TEST:PASS] Custom notification body was used."
pass "Custom notification body was used."
docker-compose down --volumes
docker compose down --volumes

View File

@@ -4,25 +4,27 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker-compose up -d
docker compose up -d
sleep 5
docker-compose exec backup backup
docker compose exec backup backup
sudo tar --same-owner -xvf ./local/backup.tar.gz -C /tmp
tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
sudo find /tmp/backup/postgres > /dev/null
echo "[TEST:PASS] Backup contains files at expected location"
sudo find $tmp_dir/backup/postgres > /dev/null
pass "Backup contains files at expected location"
for file in $(sudo find /tmp/backup/postgres); do
for file in $(sudo find $tmp_dir/backup/postgres); do
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
echo "[TEST:FAIL] Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
exit 1
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
fi
done
echo "[TEST:PASS] All files and directories in backup preserved their ownership."
pass "All files and directories in backup preserved their ownership."
docker-compose down --volumes
docker compose down --volumes

View File

@@ -12,21 +12,11 @@ services:
volumes:
- minio_backup_data:/data
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- minio
- webdav
restart: always
environment:
AWS_ACCESS_KEY_ID: test
@@ -36,19 +26,11 @@ services:
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
@@ -61,5 +43,5 @@ services:
volumes:
minio_backup_data:
webdav_backup_data:
name: minio_backup_data
app_data:

42
test/s3/run.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -0,0 +1,78 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_root_user
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_root_password
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
secrets:
- minio_root_user
- minio_root_password
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data
secrets:
minio_root_user:
external: true
minio_root_password:
external: true

44
test/secrets/run.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
printf "test" | docker secret create minio_root_user -
printf "GMusLtUmILge2by+z890kQ" | docker secret create minio_root_password -
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm \
-v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
docker stack rm test_stack
docker secret rm minio_root_password
docker secret rm minio_root_user
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -0,0 +1,47 @@
version: '3'
services:
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- ssh
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes:
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
ssh_backup_data:
name: ssh_backup_data
app_data:

43
test/ssh/run.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers 3
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@@ -64,4 +64,6 @@ services:
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data

View File

@@ -3,13 +3,15 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
echo "[TEST:INFO] Backup container not ready yet. Retrying."
info "Backup container not ready yet. Retrying."
sleep 1
done
@@ -17,19 +19,19 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \
-v test_stack_backup_data:/data alpine \
docker run --rm \
-v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
echo "[TEST:PASS] Found relevant files in untared backup."
pass "Found relevant files in untared backup."
sleep 5
if [ "$(docker ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
expect_running_containers "5"
docker stack rm test_stack
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

2
test/user/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
local
backup

View File

@@ -0,0 +1,30 @@
version: '2.4'
services:
alpine:
image: alpine:3.17.3
tty: true
volumes:
- app_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=testuser
- docker-volume-backup.archive-pre=/bin/sh -c 'whoami > /tmp/whoami.txt'
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

30
test/user/run.sh Normal file
View File

@@ -0,0 +1,30 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
user_name=testuser
docker exec user-alpine-1 adduser --disabled-password "$user_name"
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/whoami.txt ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
tar -xvf ./local/test.tar.gz
if [ "$(cat ./backup/data/whoami.txt)" != "$user_name" ]; then
fail "Could not find expected user name."
fi
pass "Found expected user."
docker compose down --volumes
sudo rm -rf ./local

23
test/util.sh Normal file
View File

@@ -0,0 +1,23 @@
#!/bin/sh
set -e
info () {
echo "[test:${current_test:-none}:info] "$1""
}
pass () {
echo "[test:${current_test:-none}:pass] "$1""
}
fail () {
echo "[test:${current_test:-none}:fail] "$1""
exit 1
}
expect_running_containers () {
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
fi
pass "$1 containers running."
}

View File

@@ -0,0 +1,45 @@
version: '3'
services:
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- webdav
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
webdav_backup_data:
name: webdav_backup_data
app_data:

40
test/webdav/run.sh Executable file
View File

@@ -0,0 +1,40 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes