mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
236 Commits
v2.37.0-al
...
v2.46.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60482b2045 | ||
|
|
52234592b2 | ||
|
|
82c441904f | ||
|
|
6ad39c70ce | ||
|
|
bda5133574 | ||
|
|
97705ecb4d | ||
|
|
a79f2c15fd | ||
|
|
f76d4fd814 | ||
|
|
bebe3484c7 | ||
|
|
17a0410529 | ||
|
|
82ed46b0ea | ||
|
|
473a1ed5ff | ||
|
|
ed70f4ac41 | ||
|
|
2d777ad76a | ||
|
|
614d325c5a | ||
|
|
b07c34b85b | ||
|
|
4262acb275 | ||
|
|
2deb1edcc9 | ||
|
|
fb36097d25 | ||
|
|
05fef47a2c | ||
|
|
746b8f71f9 | ||
|
|
cbaa17d048 | ||
|
|
17a973d5ce | ||
|
|
ac663502c1 | ||
|
|
eb357eccc0 | ||
|
|
e13f7a76be | ||
|
|
f072f96cc3 | ||
|
|
11bb1cb636 | ||
|
|
19425fbfc7 | ||
|
|
e534923d9b | ||
|
|
a552403a44 | ||
|
|
67e66a6d8f | ||
|
|
525e724399 | ||
|
|
a810457fb2 | ||
|
|
a2c86bc143 | ||
|
|
5e26d8b4d1 | ||
|
|
6acc4002fd | ||
|
|
a5579b5abb | ||
|
|
4ad98af88d | ||
|
|
1cfefd5822 | ||
|
|
c325986e53 | ||
|
|
8d51aa369f | ||
|
|
44ca8a54d3 | ||
|
|
2ab6bd887f | ||
|
|
87bf4fb3e0 | ||
|
|
0e68b98f49 | ||
|
|
16e5e0a2fc | ||
|
|
5abaca1585 | ||
|
|
e26d901812 | ||
|
|
9265f7798c | ||
|
|
7ab06cea59 | ||
|
|
2d8ec41439 | ||
|
|
6864403970 | ||
|
|
0ce19a4ff2 | ||
|
|
5291c5cc1c | ||
|
|
47a9c13b54 | ||
|
|
06bb25c980 | ||
|
|
1dafa12561 | ||
|
|
2d15c1193c | ||
|
|
b1f49ea3e1 | ||
|
|
bc35433c65 | ||
|
|
f13189e6d2 | ||
|
|
9398db7276 | ||
|
|
38755bbb96 | ||
|
|
5dd323fcd8 | ||
|
|
781b205242 | ||
|
|
a57347a68f | ||
|
|
f9eabbc326 | ||
|
|
8618599a06 | ||
|
|
276d819554 | ||
|
|
8f38f5304d | ||
|
|
6f2340fc9b | ||
|
|
d538f00dcf | ||
|
|
27371deaf8 | ||
|
|
a5019e62dd | ||
|
|
1e4e9a3316 | ||
|
|
c735962bb9 | ||
|
|
0f31bd554b | ||
|
|
e572caeaab | ||
|
|
c836c19cbe | ||
|
|
765630b131 | ||
|
|
6725f14ee0 | ||
|
|
89d020e0ed | ||
|
|
9af201d4c3 | ||
|
|
f3774385b3 | ||
|
|
44c4c31a2b | ||
|
|
152413b8bd | ||
|
|
89655e09ad | ||
|
|
016e470f5f | ||
|
|
0f30b959f8 | ||
|
|
eb4099debd | ||
|
|
d8ac5ae7e6 | ||
|
|
bad2d98ac8 | ||
|
|
2884d89f47 | ||
|
|
fcdaa09538 | ||
|
|
c48ac28626 | ||
|
|
22a4346c06 | ||
|
|
41d518a341 | ||
|
|
8f0a1c9809 | ||
|
|
75f94b0211 | ||
|
|
56f325a8bd | ||
|
|
7e6ed752f7 | ||
|
|
00cf059f4f | ||
|
|
cbbaa6ba7a | ||
|
|
2652e05169 | ||
|
|
23756074f9 | ||
|
|
40b12b9d07 | ||
|
|
e628f09122 | ||
|
|
7340e00dab | ||
|
|
958585336a | ||
|
|
68b7e4d678 | ||
|
|
857e4fc605 | ||
|
|
8d26194809 | ||
|
|
3063288d1e | ||
|
|
02fdfb363c | ||
|
|
2ee23a9384 | ||
|
|
16be0c0217 | ||
|
|
4799795f0a | ||
|
|
49b8d2f8d8 | ||
|
|
e4beef200a | ||
|
|
e75ab8bdd8 | ||
|
|
a4145352f9 | ||
|
|
615256cda9 | ||
|
|
40c4f11d70 | ||
|
|
2685571c58 | ||
|
|
04ad0777e0 | ||
|
|
50e41eac02 | ||
|
|
94e59a102e | ||
|
|
964a5e0342 | ||
|
|
2363c3c9cb | ||
|
|
6bc66db833 | ||
|
|
de40eae4de | ||
|
|
731421e359 | ||
|
|
d46918b13a | ||
|
|
2fb63059b3 | ||
|
|
e0fcbd27e5 | ||
|
|
f4884bf190 | ||
|
|
52787a1e42 | ||
|
|
6e08ae7c39 | ||
|
|
0183db831b | ||
|
|
f481fda848 | ||
|
|
f4cf4173e6 | ||
|
|
681983608f | ||
|
|
45335ffb67 | ||
|
|
01a595607d | ||
|
|
119391e8df | ||
|
|
dd5f7f5b66 | ||
|
|
c54a5bef5f | ||
|
|
8fac9608ff | ||
|
|
3ee40b6422 | ||
|
|
8b5c9a494f | ||
|
|
44ad3bbda2 | ||
|
|
74e065cbb9 | ||
|
|
8a64da4b0b | ||
|
|
f97ce11734 | ||
|
|
336e12f874 | ||
|
|
016c6c8307 | ||
|
|
e22f317fbb | ||
|
|
e04bd2f066 | ||
|
|
c4eeaad813 | ||
|
|
5840f1c5dc | ||
|
|
d71b7304c2 | ||
|
|
fbc7f85d9f | ||
|
|
2af5bdf4d9 | ||
|
|
631ca3e07d | ||
|
|
3d35d7c00e | ||
|
|
954bde73fb | ||
|
|
ab46e96706 | ||
|
|
ab4ce94534 | ||
|
|
e4170addb6 | ||
|
|
b8410bbdc5 | ||
|
|
24e1341589 | ||
|
|
3d0286472b | ||
|
|
bb11ae035b | ||
|
|
9209037ed9 | ||
|
|
2e73dea4f7 | ||
|
|
7dc3ae17e7 | ||
|
|
9d5ea718a0 | ||
|
|
272495ae7d | ||
|
|
8beb28d4f8 | ||
|
|
0ec2e68076 | ||
|
|
b85afa6008 | ||
|
|
4cb47a4818 | ||
|
|
9b5ba8958d | ||
|
|
0327701e2d | ||
|
|
58f26ba004 | ||
|
|
f62ef6e05a | ||
|
|
40924434e4 | ||
|
|
e613f6046f | ||
|
|
292d47eb19 | ||
|
|
7637975e3f | ||
|
|
c47a14c53a | ||
|
|
9f795761d6 | ||
|
|
f2ef48803c | ||
|
|
8b69566291 | ||
|
|
bf79c913e0 | ||
|
|
2f7193aa9b | ||
|
|
550c4f520f | ||
|
|
1af472077c | ||
|
|
a077f12c11 | ||
|
|
cb5a38a1b7 | ||
|
|
b8995dbc51 | ||
|
|
baf34ec1f7 | ||
|
|
e8562b1785 | ||
|
|
5d7451410b | ||
|
|
440bcf76ce | ||
|
|
2d3e79cf5e | ||
|
|
5abfe5bb39 | ||
|
|
6c8b0ccce5 | ||
|
|
f4c61125af | ||
|
|
9b768c71e6 | ||
|
|
e8307a2b5b | ||
|
|
060a6daa7a | ||
|
|
4b3ca2ebb0 | ||
|
|
02ba9939a2 | ||
|
|
911fc5a223 | ||
|
|
f64aaa6e24 | ||
|
|
dd8ff5ee0c | ||
|
|
52c22a1891 | ||
|
|
83fa0aae48 | ||
|
|
c4e480dcfd | ||
|
|
a01fc3df3f | ||
|
|
37f9bd9a8f | ||
|
|
fb4663b087 | ||
|
|
0fe983dfcc | ||
|
|
5c8bc107de | ||
|
|
9a1e885138 | ||
|
|
241b5d2f25 | ||
|
|
aab47509d9 | ||
|
|
9b52c1f63e | ||
|
|
164d6df3b4 | ||
|
|
4c74313222 | ||
|
|
de03d4f704 | ||
|
|
65626dd3d4 | ||
|
|
69eceb3982 | ||
|
|
1d45062100 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,3 +0,0 @@
|
||||
github: offen
|
||||
patreon: offen
|
||||
|
||||
7
.github/workflows/deploy-docs.yml
vendored
7
.github/workflows/deploy-docs.yml
vendored
@@ -3,6 +3,9 @@ name: Deploy Documenation site to GitHub Pages
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/deploy-docs.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -36,7 +39,7 @@ jobs:
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: 'docs/_site/'
|
||||
|
||||
@@ -49,4 +52,4 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
41
.github/workflows/golangci-lint.yml
vendored
41
.github/workflows/golangci-lint.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
@@ -15,40 +14,12 @@ jobs:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: false
|
||||
go-version: '1.25'
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
# Require: The version of golangci-lint to use.
|
||||
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
||||
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
||||
version: v1.54
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
# Optional: golangci-lint command line arguments.
|
||||
#
|
||||
# Note: By default, the `.golangci.yml` file should be at the root of the repository.
|
||||
# The location of the configuration file can be changed by using `--config=`
|
||||
# args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0
|
||||
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
# only-new-issues: true
|
||||
|
||||
# Optional: if set to true, then all caching functionality will be completely disabled,
|
||||
# takes precedence over all other caching options.
|
||||
# skip-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/go/pkg.
|
||||
# skip-pkg-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/.cache/go-build.
|
||||
# skip-build-cache: true
|
||||
|
||||
# Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'.
|
||||
# install-mode: "goinstall"
|
||||
version: v2.4
|
||||
args: --timeout 5m
|
||||
|
||||
39
.github/workflows/release.yml
vendored
39
.github/workflows/release.yml
vendored
@@ -15,6 +15,38 @@ jobs:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: set Environment Variables
|
||||
id: env
|
||||
run: |
|
||||
echo "NOW=$(date +'%F %Z %T')" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
offen/docker-volume-backup
|
||||
ghcr.io/offen/docker-volume-backup
|
||||
# define global behaviour for tags
|
||||
flavor: |
|
||||
latest=false
|
||||
# specify one tag which never gets set, to prevent the tag-attribute being empty, as it will fallback to a default
|
||||
tags: |
|
||||
# output v2.42.1-alpha.1 (incl. pre-releases)
|
||||
type=semver,pattern=v{{version}},enable=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{github.event.repository.name}}
|
||||
org.opencontainers.image.description=Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage
|
||||
org.opencontainers.image.vendor=${{github.repository_owner}}
|
||||
org.opencontainers.image.licenses=MPL-2.0
|
||||
org.opencontainers.image.version=${{github.ref_name}}
|
||||
org.opencontainers.image.created=${{ env.NOW }}
|
||||
org.opencontainers.image.source=${{github.server_url}}/${{github.repository}}
|
||||
org.opencontainers.image.revision=${{github.sha}}
|
||||
org.opencontainers.image.url=https://offen.github.io/docker-volume-backup/
|
||||
org.opencontainers.image.documentation=https://offen.github.io/docker-volume-backup/
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
@@ -35,7 +67,7 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker tags
|
||||
id: meta
|
||||
id: tags
|
||||
run: |
|
||||
version_tag="${{github.ref_name}}"
|
||||
tags=($version_tag)
|
||||
@@ -51,9 +83,10 @@ jobs:
|
||||
echo "releases=$releases" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
tags: ${{ steps.meta.outputs.releases }}
|
||||
tags: ${{ steps.tags.outputs.releases }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
21
.github/workflows/unit.yml
vendored
Normal file
21
.github/workflows/unit.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Run Unit Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.25.x'
|
||||
- name: Install dependencies
|
||||
run: go mod download
|
||||
- name: Test with the Go CLI
|
||||
run: go test -v ./...
|
||||
@@ -1,8 +1,7 @@
|
||||
version: '2'
|
||||
linters:
|
||||
# Enable specific linter
|
||||
# https://golangci-lint.run/usage/linters/#enabled-by-default
|
||||
enable:
|
||||
- staticcheck
|
||||
- govet
|
||||
output:
|
||||
format: github-actions
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,7 +1,7 @@
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
FROM golang:1.21-alpine as builder
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@@ -9,13 +9,13 @@ RUN go mod download
|
||||
WORKDIR /app/cmd/backup
|
||||
RUN go build -o backup .
|
||||
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.22
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates && \
|
||||
chmod a+rw /var/lock
|
||||
|
||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||
COPY --chmod=755 ./entrypoint.sh /root/
|
||||
|
||||
ENTRYPOINT ["/root/entrypoint.sh"]
|
||||
ENTRYPOINT ["/usr/bin/backup", "-foreground"]
|
||||
|
||||
23
README.md
23
README.md
@@ -1,13 +1,13 @@
|
||||
<a href="https://www.offen.dev/">
|
||||
<img src="https://offen.github.io/press-kit/offen-material/gfx-GitHub-Offen-logo.svg" alt="Offen logo" title="Offen" width="150px"/>
|
||||
<a href="https://www.offen.software/">
|
||||
<img src="https://offen.github.io/press-kit/avatars/avatar-OS-header.svg" alt="offen.software logo" title="offen.software" width="60px"/>
|
||||
</a>
|
||||
|
||||
# docker-volume-backup
|
||||
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage.
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox, Google Drive or SSH compatible storage.
|
||||
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) companion container to an existing Docker setup.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage (or any combination thereof) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for (failed) backup runs__.
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 25MB) companion container to an existing Docker setup.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox, Google Drive or SSH compatible storage (or any combination thereof) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for (failed) backup runs__.
|
||||
|
||||
Documentation is found at <https://offen.github.io/docker-volume-backup>
|
||||
- [Quickstart](https://offen.github.io/docker-volume-backup)
|
||||
@@ -24,8 +24,6 @@ Documentation is found at <https://offen.github.io/docker-volume-backup>
|
||||
Add a `backup` service to your compose setup and mount the volumes you would like to see backed up:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
volume-consumer:
|
||||
build:
|
||||
@@ -76,4 +74,13 @@ docker run --rm \
|
||||
offen/docker-volume-backup:v2
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described [in the docs](https://offen.github.io/docker-volume-backup/reference/).
|
||||
|
||||
### Looking for help?
|
||||
|
||||
In case your are looking for help or guidance on how to incorporate docker-volume-backup into your existing setup, consider [becoming a sponsor](https://github.com/sponsors/offen?frequency=one-time) and book a one hour consulting session.
|
||||
|
||||
---
|
||||
|
||||
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License</a>.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
||||
@@ -16,23 +16,22 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||
_, outputFilePath, err := makeAbsolute(stripTrailingSlashes(inputFilePath), outputFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
|
||||
return errwrap.Wrap(err, "error transposing given file paths")
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||
return errwrap.Wrap(err, "error creating output file path")
|
||||
}
|
||||
|
||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||
if err := compress(files, outputFilePath, compression, compressionConcurrency); err != nil {
|
||||
return errwrap.Wrap(err, "error creating archive")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -55,38 +54,38 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
||||
return inputFilePath, outputFilePath, err
|
||||
}
|
||||
|
||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
||||
func compress(paths []string, outFilePath, algo string, concurrency int) error {
|
||||
file, err := os.Create(outFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||
return errwrap.Wrap(err, "error creating out file")
|
||||
}
|
||||
|
||||
prefix := path.Dir(outFilePath)
|
||||
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error getting compression writer: %w", err)
|
||||
return errwrap.Wrap(err, "error getting compression writer")
|
||||
}
|
||||
tarWriter := tar.NewWriter(compressWriter)
|
||||
|
||||
for _, p := range paths {
|
||||
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
||||
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error writing %s to archive", p))
|
||||
}
|
||||
}
|
||||
|
||||
err = tarWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
||||
return errwrap.Wrap(err, "error closing tar writer")
|
||||
}
|
||||
|
||||
err = compressWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error closing compression writer: %w", err)
|
||||
return errwrap.Wrap(err, "error closing compression writer")
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error closing file: %w", err)
|
||||
return errwrap.Wrap(err, "error closing file")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -94,10 +93,12 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
||||
|
||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||
switch algo {
|
||||
case "none":
|
||||
return &passThroughWriteCloser{file}, nil
|
||||
case "gz":
|
||||
w, err := pgzip.NewWriterLevel(file, 5)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: gzip error: %w", err)
|
||||
return nil, errwrap.Wrap(err, "gzip error")
|
||||
}
|
||||
|
||||
if concurrency == 0 {
|
||||
@@ -105,25 +106,26 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write
|
||||
}
|
||||
|
||||
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: error setting concurrency: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error setting concurrency")
|
||||
}
|
||||
|
||||
return w, nil
|
||||
case "zst":
|
||||
compressWriter, err := zstd.NewWriter(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: zstd error: %w", err)
|
||||
return nil, errwrap.Wrap(err, "zstd error")
|
||||
}
|
||||
return compressWriter, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("getCompressionWriter: unsupported compression algorithm: %s", algo)
|
||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("unsupported compression algorithm: %s", algo))
|
||||
}
|
||||
}
|
||||
|
||||
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
func writeTarball(path string, tarWriter *tar.Writer, prefix string) (returnErr error) {
|
||||
fileInfo, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error getting file info for %s", path))
|
||||
return
|
||||
}
|
||||
|
||||
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
@@ -134,19 +136,22 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
var err error
|
||||
if link, err = os.Readlink(path); err != nil {
|
||||
return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error resolving symlink %s", path))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error getting file info header: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error getting file info header")
|
||||
return
|
||||
}
|
||||
header.Name = strings.TrimPrefix(path, prefix)
|
||||
|
||||
err = tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error writing file info header: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error writing file info header")
|
||||
return
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
@@ -155,14 +160,30 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error opening %s: %w", path, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error opening %s", path))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
defer func() {
|
||||
returnErr = file.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error copying %s to tar writer", path))
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type passThroughWriteCloser struct {
|
||||
target io.WriteCloser
|
||||
}
|
||||
|
||||
func (p *passThroughWriteCloser) Write(b []byte) (int, error) {
|
||||
return p.target.Write(b)
|
||||
}
|
||||
|
||||
func (p *passThroughWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
152
cmd/backup/command.go
Normal file
152
cmd/backup/command.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
logger *slog.Logger
|
||||
schedules []cron.EntryID
|
||||
cr *cron.Cron
|
||||
reload chan struct{}
|
||||
}
|
||||
|
||||
func newCommand() *command {
|
||||
return &command{
|
||||
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
|
||||
}
|
||||
}
|
||||
|
||||
// runAsCommand executes a backup run for each configuration that is available
|
||||
// and then returns
|
||||
func (c *command) runAsCommand() error {
|
||||
configurations, err := sourceConfiguration(configStrategyEnv)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error loading env vars")
|
||||
}
|
||||
|
||||
for _, config := range configurations {
|
||||
if err := runScript(config); err != nil {
|
||||
return errwrap.Wrap(err, "error running script")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type foregroundOpts struct {
|
||||
profileCronExpression string
|
||||
}
|
||||
|
||||
// runInForeground starts the program as a long running process, scheduling
|
||||
// a job for each configuration that is available.
|
||||
func (c *command) runInForeground(opts foregroundOpts) error {
|
||||
c.cr = cron.New(
|
||||
cron.WithParser(
|
||||
cron.NewParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
if err := c.schedule(configStrategyConfd); err != nil {
|
||||
return errwrap.Wrap(err, "error scheduling")
|
||||
}
|
||||
|
||||
if opts.profileCronExpression != "" {
|
||||
if _, err := c.cr.AddFunc(opts.profileCronExpression, c.profile); err != nil {
|
||||
return errwrap.Wrap(err, "error adding profiling job")
|
||||
}
|
||||
}
|
||||
|
||||
var quit = make(chan os.Signal, 1)
|
||||
c.reload = make(chan struct{}, 1)
|
||||
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
||||
c.cr.Start()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
ctx := c.cr.Stop()
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
case <-c.reload:
|
||||
if err := c.schedule(configStrategyConfd); err != nil {
|
||||
return errwrap.Wrap(err, "error reloading configuration")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// schedule wipes all existing schedules and enqueues all schedules available
|
||||
// using the given configuration strategy
|
||||
func (c *command) schedule(strategy configStrategy) error {
|
||||
for _, id := range c.schedules {
|
||||
c.cr.Remove(id)
|
||||
}
|
||||
|
||||
configurations, err := sourceConfiguration(strategy)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error sourcing configuration")
|
||||
}
|
||||
|
||||
for _, cfg := range configurations {
|
||||
config := cfg
|
||||
id, err := c.cr.AddFunc(config.BackupCronExpression, func() {
|
||||
c.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Now running script on schedule %s",
|
||||
config.BackupCronExpression,
|
||||
),
|
||||
)
|
||||
|
||||
if err := runScript(config); err != nil {
|
||||
c.logger.Error(
|
||||
fmt.Sprintf(
|
||||
"Unexpected error running schedule %s: %v",
|
||||
config.BackupCronExpression,
|
||||
errwrap.Unwrap(err),
|
||||
),
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error adding schedule %s", config.BackupCronExpression))
|
||||
}
|
||||
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", config.source, config.BackupCronExpression))
|
||||
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
||||
c.logger.Warn(
|
||||
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||
)
|
||||
}
|
||||
c.schedules = append(c.schedules, id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// must exits the program when passed an error. It should be the only
|
||||
// place where the application exits forcefully.
|
||||
func (c *command) must(err error) {
|
||||
if err != nil {
|
||||
c.logger.Error(
|
||||
fmt.Sprintf("Fatal error running command: %v", errwrap.Unwrap(err)),
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// Config holds all configuration values that are expected to be set
|
||||
@@ -34,16 +36,22 @@ type Config struct {
|
||||
BackupFilenameExpand bool `split_words:"true"`
|
||||
BackupLatestSymlink string `split_words:"true"`
|
||||
BackupArchive string `split_words:"true" default:"/archive"`
|
||||
BackupCronExpression string `split_words:"true" default:"@daily"`
|
||||
BackupJitter time.Duration `split_words:"true" default:"0s"`
|
||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||
BackupPruningPrefix string `split_words:"true"`
|
||||
BackupStopContainerLabel string `split_words:"true"`
|
||||
BackupStopDuringBackupLabel string `split_words:"true" default:"true"`
|
||||
BackupStopDuringBackupNoRestartLabel string `split_words:"true" default:"true"`
|
||||
BackupStopServiceTimeout time.Duration `split_words:"true" default:"5m"`
|
||||
BackupFromSnapshot bool `split_words:"true"`
|
||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||
GpgPassphrase string `split_words:"true"`
|
||||
GpgPublicKeyRing string `split_words:"true"`
|
||||
AgePassphrase string `split_words:"true"`
|
||||
AgePublicKeys []string `split_words:"true"`
|
||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||
NotificationLevel string `split_words:"true" default:"error"`
|
||||
EmailNotificationRecipient string `split_words:"true"`
|
||||
@@ -69,9 +77,11 @@ type Config struct {
|
||||
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||
AzureStorageAccountName string `split_words:"true"`
|
||||
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
||||
AzureStorageConnectionString string `split_words:"true"`
|
||||
AzureStorageContainerName string `split_words:"true"`
|
||||
AzureStoragePath string `split_words:"true"`
|
||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||
AzureStorageAccessTier string `split_words:"true"`
|
||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||
DropboxRefreshToken string `split_words:"true"`
|
||||
@@ -79,17 +89,24 @@ type Config struct {
|
||||
DropboxAppSecret string `split_words:"true"`
|
||||
DropboxRemotePath string `split_words:"true"`
|
||||
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
||||
GoogleDriveCredentialsJSON string `split_words:"true"`
|
||||
GoogleDriveFolderID string `split_words:"true"`
|
||||
GoogleDriveImpersonateSubject string `split_words:"true"`
|
||||
GoogleDriveEndpoint string `split_words:"true"`
|
||||
GoogleDriveTokenURL string `split_words:"true"`
|
||||
source string
|
||||
additionalEnvVars map[string]string
|
||||
}
|
||||
|
||||
type CompressionType string
|
||||
|
||||
func (c *CompressionType) Decode(v string) error {
|
||||
switch v {
|
||||
case "gz", "zst":
|
||||
case "none", "gz", "zst":
|
||||
*c = CompressionType(v)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("config: error decoding compression type %s", v)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("error decoding compression type %s", v))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +129,7 @@ func (c *CertDecoder) Decode(v string) error {
|
||||
block, _ := pem.Decode(content)
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error parsing certificate: %w", err)
|
||||
return errwrap.Wrap(err, "error parsing certificate")
|
||||
}
|
||||
*c = CertDecoder{Cert: cert}
|
||||
return nil
|
||||
@@ -128,7 +145,7 @@ func (r *RegexpDecoder) Decode(v string) error {
|
||||
}
|
||||
re, err := regexp.Compile(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error compiling given regexp `%s`", v))
|
||||
}
|
||||
*r = RegexpDecoder{Re: re}
|
||||
return nil
|
||||
@@ -140,10 +157,10 @@ type NaturalNumber int
|
||||
func (n *NaturalNumber) Decode(v string) error {
|
||||
asInt, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error converting %s to int", v)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||
}
|
||||
if asInt <= 0 {
|
||||
return fmt.Errorf("config: expected a natural number, got %d", asInt)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("expected a natural number, got %d", asInt))
|
||||
}
|
||||
*n = NaturalNumber(asInt)
|
||||
return nil
|
||||
@@ -159,10 +176,10 @@ type WholeNumber int
|
||||
func (n *WholeNumber) Decode(v string) error {
|
||||
asInt, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error converting %s to int", v)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||
}
|
||||
if asInt < 0 {
|
||||
return fmt.Errorf("config: expected a whole, positive number, including zero. Got %d", asInt)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("expected a whole, positive number, including zero. Got %d", asInt))
|
||||
}
|
||||
*n = WholeNumber(asInt)
|
||||
return nil
|
||||
@@ -171,3 +188,40 @@ func (n *WholeNumber) Decode(v string) error {
|
||||
func (n *WholeNumber) Int() int {
|
||||
return int(*n)
|
||||
}
|
||||
|
||||
type envVarLookup struct {
|
||||
ok bool
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// applyEnv sets the values in `additionalEnvVars` as environment variables.
|
||||
// It returns a function that reverts all values that have been set to its
|
||||
// previous state.
|
||||
func (c *Config) applyEnv() (func() error, error) {
|
||||
lookups := []envVarLookup{}
|
||||
|
||||
unset := func() error {
|
||||
for _, lookup := range lookups {
|
||||
if !lookup.ok {
|
||||
if err := os.Unsetenv(lookup.key); err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error unsetting env var %s", lookup.key))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := os.Setenv(lookup.key, lookup.value); err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error setting back env var %s", lookup.key))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for key, value := range c.additionalEnvVars {
|
||||
current, ok := os.LookupEnv(key)
|
||||
lookups = append(lookups, envVarLookup{ok: ok, key: key, value: current})
|
||||
if err := os.Setenv(key, value); err != nil {
|
||||
return unset, errwrap.Wrap(err, "error setting env var")
|
||||
}
|
||||
}
|
||||
return unset, nil
|
||||
}
|
||||
|
||||
166
cmd/backup/config_provider.go
Normal file
166
cmd/backup/config_provider.go
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/envconfig"
|
||||
shell "mvdan.cc/sh/v3/shell"
|
||||
)
|
||||
|
||||
type configStrategy string
|
||||
|
||||
const (
|
||||
configStrategyEnv configStrategy = "env"
|
||||
configStrategyConfd configStrategy = "confd"
|
||||
)
|
||||
|
||||
// sourceConfiguration returns a list of config objects using the given
|
||||
// strategy. It should be the single entrypoint for retrieving configuration
|
||||
// for all consumers.
|
||||
func sourceConfiguration(strategy configStrategy) ([]*Config, error) {
|
||||
switch strategy {
|
||||
case configStrategyEnv:
|
||||
c, err := loadConfigFromEnvVars()
|
||||
return []*Config{c}, err
|
||||
case configStrategyConfd:
|
||||
cs, err := loadConfigsFromEnvFiles("/etc/dockervolumebackup/conf.d")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return sourceConfiguration(configStrategyEnv)
|
||||
}
|
||||
return nil, errwrap.Wrap(err, "error loading config files")
|
||||
}
|
||||
return cs, nil
|
||||
default:
|
||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("received unknown config strategy: %v", strategy))
|
||||
}
|
||||
}
|
||||
|
||||
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
|
||||
type envProxy func(string) (string, bool)
|
||||
|
||||
// loadConfig creates a config object using the given lookup function
|
||||
func loadConfig(lookup envProxy) (*Config, error) {
|
||||
envconfig.Lookup = func(key string) (string, bool) {
|
||||
value, okValue := lookup(key)
|
||||
location, okFile := lookup(key + "_FILE")
|
||||
|
||||
switch {
|
||||
case okValue && !okFile: // only value
|
||||
return value, true
|
||||
case !okValue && okFile: // only file
|
||||
contents, err := os.ReadFile(location)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return string(contents), true
|
||||
case okValue && okFile: // both
|
||||
return "", false
|
||||
default: // neither, ignore
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
var c = &Config{}
|
||||
if err := envconfig.Process("", c); err != nil {
|
||||
return nil, errwrap.Wrap(err, "failed to process configuration values")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func loadConfigFromEnvVars() (*Config, error) {
|
||||
c, err := loadConfig(os.LookupEnv)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error loading config from environment")
|
||||
}
|
||||
c.source = "from environment"
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func loadConfigsFromEnvFiles(directory string) ([]*Config, error) {
|
||||
items, err := os.ReadDir(directory)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errwrap.Wrap(err, "failed to read files from env directory")
|
||||
}
|
||||
|
||||
configs := []*Config{}
|
||||
for _, item := range items {
|
||||
if item.IsDir() {
|
||||
continue
|
||||
}
|
||||
p := filepath.Join(directory, item.Name())
|
||||
envFile, err := source(p)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, fmt.Sprintf("error reading config file %s", p))
|
||||
}
|
||||
lookup := func(key string) (string, bool) {
|
||||
val, ok := envFile[key]
|
||||
if ok {
|
||||
return val, ok
|
||||
}
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
c, err := loadConfig(lookup)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, fmt.Sprintf("error loading config from file %s", p))
|
||||
}
|
||||
c.source = item.Name()
|
||||
c.additionalEnvVars = envFile
|
||||
configs = append(configs, c)
|
||||
}
|
||||
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
// source tries to mimic the pre v2.37.0 behavior of calling
|
||||
// `set +a; source $path; set -a` and returns the env vars as a map
|
||||
func source(path string) (map[string]string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, fmt.Sprintf("error opening %s", path))
|
||||
}
|
||||
|
||||
result := map[string]string{}
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
withExpansion, err := shell.Expand(line, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error expanding env")
|
||||
}
|
||||
m, err := godotenv.Unmarshal(withExpansion)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, fmt.Sprintf("error sourcing %s", path))
|
||||
}
|
||||
for key, value := range m {
|
||||
currentValue, currentOk := os.LookupEnv(key)
|
||||
defer func() {
|
||||
if currentOk {
|
||||
_ = os.Setenv(key, currentValue)
|
||||
return
|
||||
}
|
||||
_ = os.Unsetenv(key)
|
||||
}()
|
||||
result[key] = value
|
||||
_ = os.Setenv(key, value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
79
cmd/backup/config_provider_test.go
Normal file
79
cmd/backup/config_provider_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSource(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectError bool
|
||||
expectedOutput map[string]string
|
||||
}{
|
||||
{
|
||||
"default",
|
||||
"testdata/default.env",
|
||||
false,
|
||||
map[string]string{
|
||||
"FOO": "bar",
|
||||
"BAZ": "qux",
|
||||
},
|
||||
},
|
||||
{
|
||||
"not found",
|
||||
"testdata/nope.env",
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"braces",
|
||||
"testdata/braces.env",
|
||||
false,
|
||||
map[string]string{
|
||||
"FOO": "qux",
|
||||
"BAR": "xxx",
|
||||
"BAZ": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
"expansion",
|
||||
"testdata/expansion.env",
|
||||
false,
|
||||
map[string]string{
|
||||
"BAR": "xxx",
|
||||
"FOO": "xxx",
|
||||
"BAZ": "xxx",
|
||||
"QUX": "yyy",
|
||||
},
|
||||
},
|
||||
{
|
||||
"comments",
|
||||
"testdata/comments.env",
|
||||
false,
|
||||
map[string]string{
|
||||
"BAR": "xxx",
|
||||
"BAZ": "yyy",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_ = os.Setenv("QUX", "yyy")
|
||||
defer func() {
|
||||
_ = os.Unsetenv("QUX")
|
||||
}()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := source(test.input)
|
||||
if (err != nil) != test.expectError {
|
||||
t.Errorf("Unexpected error value %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedOutput, result) {
|
||||
t.Errorf("Expected %v, got %v", test.expectedOutput, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
41
cmd/backup/copy_archive.go
Normal file
41
cmd/backup/copy_archive.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// copyArchive makes sure the backup file is copied to both local and remote locations
|
||||
// as per the given configuration.
|
||||
func (s *script) copyArchive() error {
|
||||
_, name := path.Split(s.file)
|
||||
if stat, err := os.Stat(s.file); err != nil {
|
||||
return errwrap.Wrap(err, "unable to stat backup file")
|
||||
} else {
|
||||
size := stat.Size()
|
||||
s.stats.BackupFile = BackupFileStats{
|
||||
Size: uint64(size),
|
||||
Name: name,
|
||||
FullPath: s.file,
|
||||
}
|
||||
}
|
||||
|
||||
eg := errgroup.Group{}
|
||||
for _, backend := range s.storages {
|
||||
b := backend
|
||||
eg.Go(func() error {
|
||||
return b.Copy(s.file)
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errwrap.Wrap(err, "error copying archive")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
88
cmd/backup/create_archive.go
Normal file
88
cmd/backup/create_archive.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/otiai10/copy"
|
||||
)
|
||||
|
||||
// createArchive creates a tar archive of the configured backup location and
|
||||
// saves it to disk.
|
||||
func (s *script) createArchive() error {
|
||||
backupSources := s.c.BackupSources
|
||||
|
||||
if s.c.BackupFromSnapshot {
|
||||
s.logger.Warn(
|
||||
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||
)
|
||||
s.logger.Warn(
|
||||
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
|
||||
)
|
||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(backupSources); err != nil {
|
||||
return errwrap.Wrap(err, "error removing snapshot")
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||
PreserveTimes: true,
|
||||
PreserveOwner: true,
|
||||
}); err != nil {
|
||||
return errwrap.Wrap(err, "error creating snapshot")
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||
)
|
||||
}
|
||||
|
||||
tarFile := s.file
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(tarFile); err != nil {
|
||||
return errwrap.Wrap(err, "error removing tar file")
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error getting absolute path")
|
||||
}
|
||||
|
||||
var filesEligibleForBackup []string
|
||||
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||
return nil
|
||||
}
|
||||
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errwrap.Wrap(err, "error walking filesystem tree")
|
||||
}
|
||||
|
||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
||||
return errwrap.Wrap(err, "error compressing backup folder")
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
226
cmd/backup/encrypt_archive.go
Normal file
226
cmd/backup/encrypt_archive.go
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"filippo.io/age"
|
||||
"filippo.io/age/agessh"
|
||||
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
func countTrue(b ...bool) int {
|
||||
c := int(0)
|
||||
for _, v := range b {
|
||||
if v {
|
||||
c++
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase or publickey(s).
|
||||
// In case no passphrase or publickey is given it returns early, leaving the backup file
|
||||
// untouched.
|
||||
func (s *script) encryptArchive() error {
|
||||
useGPGSymmetric := s.c.GpgPassphrase != ""
|
||||
useGPGAsymmetric := s.c.GpgPublicKeyRing != ""
|
||||
useAgeSymmetric := s.c.AgePassphrase != ""
|
||||
useAgeAsymmetric := len(s.c.AgePublicKeys) > 0
|
||||
switch nconfigured := countTrue(
|
||||
useGPGSymmetric,
|
||||
useGPGAsymmetric,
|
||||
useAgeSymmetric,
|
||||
useAgeAsymmetric,
|
||||
); nconfigured {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
// ok!
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"error in selecting archive encryption method: expected 0 or 1 to be configured, %d methods are configured",
|
||||
nconfigured,
|
||||
)
|
||||
}
|
||||
|
||||
if useGPGSymmetric {
|
||||
return s.encryptWithGPGSymmetric()
|
||||
} else if useGPGAsymmetric {
|
||||
return s.encryptWithGPGAsymmetric()
|
||||
} else if useAgeSymmetric || useAgeAsymmetric {
|
||||
ar, err := s.getConfiguredAgeRecipients()
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "failed to get configured age recipients")
|
||||
}
|
||||
return s.encryptWithAge(ar)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *script) getConfiguredAgeRecipients() ([]age.Recipient, error) {
|
||||
if s.c.AgePassphrase == "" && len(s.c.AgePublicKeys) == 0 {
|
||||
return nil, fmt.Errorf("no age recipients configured")
|
||||
}
|
||||
recipients := []age.Recipient{}
|
||||
if len(s.c.AgePublicKeys) > 0 {
|
||||
for _, pk := range s.c.AgePublicKeys {
|
||||
pkr, err := parseAgeRecipient(pk)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "failed to parse age public key")
|
||||
}
|
||||
recipients = append(recipients, pkr)
|
||||
}
|
||||
}
|
||||
if s.c.AgePassphrase != "" {
|
||||
if len(recipients) != 0 {
|
||||
return nil, fmt.Errorf("age encryption must only be enabled via passphrase or public key, not both")
|
||||
}
|
||||
|
||||
r, err := age.NewScryptRecipient(s.c.AgePassphrase)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "failed to create scrypt identity from age passphrase")
|
||||
}
|
||||
recipients = append(recipients, r)
|
||||
}
|
||||
return recipients, nil
|
||||
}
|
||||
|
||||
func parseAgeRecipient(arg string) (age.Recipient, error) {
|
||||
// This logic is adapted from what the age CLI is doing
|
||||
// stripping some special cases
|
||||
switch {
|
||||
case strings.HasPrefix(arg, "age1"):
|
||||
return age.ParseX25519Recipient(arg)
|
||||
case strings.HasPrefix(arg, "ssh-"):
|
||||
return agessh.ParseRecipient(arg)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown recipient type: %q", arg)
|
||||
}
|
||||
|
||||
func (s *script) encryptWithAge(rec []age.Recipient) error {
|
||||
return s.doEncrypt("age", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||
return age.Encrypt(ciphertextWriter, rec...)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *script) encryptWithGPGSymmetric() error {
|
||||
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||
_, name := path.Split(s.file)
|
||||
return openpgp.SymmetricallyEncrypt(ciphertextWriter, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
})
|
||||
}
|
||||
|
||||
type closeAllWriter struct {
|
||||
io.Writer
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
func (c *closeAllWriter) Close() (err error) {
|
||||
for _, cl := range c.closers {
|
||||
err = errors.Join(err, cl.Close())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var _ io.WriteCloser = (*closeAllWriter)(nil)
|
||||
|
||||
func (s *script) encryptWithGPGAsymmetric() error {
|
||||
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (_ io.WriteCloser, outerr error) {
|
||||
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(s.c.GpgPublicKeyRing)))
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error parsing armored keyring")
|
||||
}
|
||||
|
||||
armoredWriter, err := armor.Encode(ciphertextWriter, "PGP MESSAGE", nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error preparing encryption")
|
||||
}
|
||||
defer func() {
|
||||
if outerr != nil {
|
||||
_ = armoredWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
_, name := path.Split(s.file)
|
||||
encWriter, err := openpgp.Encrypt(armoredWriter, entityList, nil, nil, &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &closeAllWriter{
|
||||
Writer: encWriter,
|
||||
closers: []io.Closer{encWriter, armoredWriter},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *script) doEncrypt(
|
||||
extension string,
|
||||
encryptor func(ciphertextWriter io.Writer) (io.WriteCloser, error),
|
||||
) (outerr error) {
|
||||
encFile := fmt.Sprintf("%s.%s", s.file, extension)
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(encFile); err != nil {
|
||||
return errwrap.Wrap(err, "error removing encrypted file")
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed encrypted file `%s`.", encFile),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
outFile, err := os.Create(encFile)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error opening out file")
|
||||
}
|
||||
defer func() {
|
||||
if err := outFile.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing out file"))
|
||||
}
|
||||
}()
|
||||
|
||||
dst, err := encryptor(outFile)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error encrypting backup file")
|
||||
}
|
||||
defer func() {
|
||||
if err := dst.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing encrypted backup file"))
|
||||
}
|
||||
}()
|
||||
|
||||
src, err := os.Open(s.file)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file %q", s.file))
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing backup file"))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||
}
|
||||
|
||||
s.file = encFile
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Encrypted backup using %q, saving as %q", extension, s.file),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
||||
@@ -9,24 +9,34 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/cosiner/argv"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
|
||||
args, _ := argv.Argv(command, nil, nil)
|
||||
args, err := argv.Argv(command, nil, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errwrap.Wrap(err, fmt.Sprintf("error parsing argv from '%s'", command))
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return nil, nil, errwrap.Wrap(nil, "received unexpected empty command")
|
||||
}
|
||||
|
||||
commandEnv := []string{
|
||||
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
||||
}
|
||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
||||
|
||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, container.ExecOptions{
|
||||
Cmd: args[0],
|
||||
AttachStdin: true,
|
||||
AttachStderr: true,
|
||||
@@ -34,43 +44,51 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
||||
User: user,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
|
||||
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
||||
}
|
||||
|
||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, container.ExecStartOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
|
||||
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
var outBuf, errBuf bytes.Buffer
|
||||
var outBuf, errBuf, fullRespBuf bytes.Buffer
|
||||
outputDone := make(chan error)
|
||||
|
||||
tee := io.TeeReader(resp.Reader, &fullRespBuf)
|
||||
|
||||
go func() {
|
||||
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
|
||||
_, err := stdcopy.StdCopy(&outBuf, &errBuf, tee)
|
||||
outputDone <- err
|
||||
}()
|
||||
|
||||
if err := <-outputDone; err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
||||
if body, bErr := io.ReadAll(&fullRespBuf); bErr == nil {
|
||||
// if possible, try to append the exec output to the error
|
||||
// as it's likely to be more relevant for users than the error from
|
||||
// calling stdcopy.Copy
|
||||
err = errwrap.Wrap(errors.New(string(body)), err.Error())
|
||||
}
|
||||
return nil, nil, errwrap.Wrap(err, "error demultiplexing output")
|
||||
}
|
||||
|
||||
stdout, err := io.ReadAll(&outBuf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
||||
return nil, nil, errwrap.Wrap(err, "error reading stdout")
|
||||
}
|
||||
stderr, err := io.ReadAll(&errBuf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
||||
return nil, nil, errwrap.Wrap(err, "error reading stderr")
|
||||
}
|
||||
|
||||
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
|
||||
return nil, nil, errwrap.Wrap(err, "error inspecting container exec")
|
||||
}
|
||||
|
||||
if res.ExitCode > 0 {
|
||||
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
|
||||
return stdout, stderr, errwrap.Wrap(nil, fmt.Sprintf("running command exited %d", res.ExitCode))
|
||||
}
|
||||
|
||||
return stdout, stderr, nil
|
||||
@@ -86,11 +104,11 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||
})
|
||||
}
|
||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||
return errwrap.Wrap(err, "error querying for containers")
|
||||
}
|
||||
|
||||
var hasDeprecatedContainers bool
|
||||
@@ -99,11 +117,11 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Key: "label",
|
||||
Value: "docker-volume-backup.exec-pre",
|
||||
}
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||
return errwrap.Wrap(err, "error querying for containers")
|
||||
}
|
||||
if len(deprecatedContainers) != 0 {
|
||||
hasDeprecatedContainers = true
|
||||
@@ -116,11 +134,11 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Key: "label",
|
||||
Value: "docker-volume-backup.exec-post",
|
||||
}
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||
return errwrap.Wrap(err, "error querying for containers")
|
||||
}
|
||||
if len(deprecatedContainers) != 0 {
|
||||
hasDeprecatedContainers = true
|
||||
@@ -159,18 +177,22 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
s.logger.Info(fmt.Sprintf("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/")))
|
||||
stdout, stderr, err := s.exec(c.ID, cmd, user)
|
||||
if s.c.ExecForwardOutput {
|
||||
os.Stderr.Write(stderr)
|
||||
os.Stdout.Write(stdout)
|
||||
if _, err := os.Stderr.Write(stderr); err != nil {
|
||||
return errwrap.Wrap(err, "error writing to stderr")
|
||||
}
|
||||
if _, err := os.Stdout.Write(stdout); err != nil {
|
||||
return errwrap.Wrap(err, "error writing to stdout")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
||||
return errwrap.Wrap(err, "error executing command")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
||||
return errwrap.Wrap(err, "error from errgroup")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -188,13 +210,17 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
|
||||
if s.cli == nil {
|
||||
return cb
|
||||
}
|
||||
return func() error {
|
||||
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||
return func() (err error) {
|
||||
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||
err = errwrap.Wrap(err, fmt.Sprintf("error running %s-pre commands", step))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
|
||||
if derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)); derr != nil {
|
||||
err = errors.Join(err, errwrap.Wrap(derr, fmt.Sprintf("error running %s-post commands", step)))
|
||||
}
|
||||
}()
|
||||
return cb()
|
||||
err = cb()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// hook contains a queued action that can be trigger them when the script
|
||||
@@ -47,7 +48,7 @@ func (s *script) runHooks(err error) error {
|
||||
continue
|
||||
}
|
||||
if actionErr := hook.action(err); actionErr != nil {
|
||||
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
||||
actionErrors = append(actionErrors, errwrap.Wrap(actionErr, "error running hook"))
|
||||
}
|
||||
}
|
||||
if len(actionErrors) != 0 {
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// lock opens a lockfile at the given location, keeping it locked until the
|
||||
@@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
||||
for {
|
||||
acquired, err := fileLock.TryLock()
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
|
||||
return noop, errwrap.Wrap(err, "error trying to lock")
|
||||
}
|
||||
if acquired {
|
||||
if s.encounteredLock {
|
||||
@@ -54,7 +54,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
||||
case <-retry.C:
|
||||
continue
|
||||
case <-deadline.C:
|
||||
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
||||
return noop, errwrap.Wrap(nil, "timed out waiting for lockfile to become available")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,68 +1,24 @@
|
||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2021-2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"flag"
|
||||
)
|
||||
|
||||
func main() {
|
||||
s, err := newScript()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
|
||||
profile := flag.String("profile", "", "collect runtime metrics and log them periodically on the given cron expression")
|
||||
flag.Parse()
|
||||
|
||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||
defer func() {
|
||||
s.must(unlock())
|
||||
}()
|
||||
s.must(err)
|
||||
|
||||
defer func() {
|
||||
if pArg := recover(); pArg != nil {
|
||||
if err, ok := pArg.(error); ok {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("Executing the script encountered a panic: %v", err),
|
||||
)
|
||||
if hookErr := s.runHooks(err); hookErr != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
|
||||
)
|
||||
c := newCommand()
|
||||
if *foreground {
|
||||
opts := foregroundOpts{
|
||||
profileCronExpression: *profile,
|
||||
}
|
||||
os.Exit(1)
|
||||
c.must(c.runInForeground(opts))
|
||||
} else {
|
||||
c.must(c.runAsCommand())
|
||||
}
|
||||
panic(pArg)
|
||||
}
|
||||
|
||||
if err := s.runHooks(nil); err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf(
|
||||
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
||||
err,
|
||||
),
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
s.logger.Info("Finished running backup tasks.")
|
||||
}()
|
||||
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
|
||||
restartContainersAndServices, err := s.stopContainersAndServices()
|
||||
// The mechanism for restarting containers is not using hooks as it
|
||||
// should happen as soon as possible (i.e. before uploading backups or
|
||||
// similar).
|
||||
defer func() {
|
||||
s.must(restartContainersAndServices())
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.createArchive()
|
||||
})())
|
||||
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
|
||||
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -13,7 +13,8 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||
sTypes "github.com/nicholas-fedor/shoutrrr/pkg/types"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
//go:embed notifications.tmpl
|
||||
@@ -37,16 +38,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er
|
||||
|
||||
titleBuf := &bytes.Buffer{}
|
||||
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
||||
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", titleTemplate))
|
||||
}
|
||||
|
||||
bodyBuf := &bytes.Buffer{}
|
||||
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
||||
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", bodyTemplate))
|
||||
}
|
||||
|
||||
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
||||
return fmt.Errorf("notify: error notifying: %w", err)
|
||||
return errwrap.Wrap(err, "error sending notification")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -70,7 +71,7 @@ func (s *script) sendNotification(title, body string) error {
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
|
||||
return errwrap.Wrap(errors.Join(errs...), "error sending message")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
24
cmd/backup/profile.go
Normal file
24
cmd/backup/profile.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import "runtime"
|
||||
|
||||
func (c *command) profile() {
|
||||
memStats := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&memStats)
|
||||
c.logger.Info(
|
||||
"Collecting runtime information",
|
||||
"num_goroutines",
|
||||
runtime.NumGoroutine(),
|
||||
"memory_heap_alloc",
|
||||
formatBytes(memStats.HeapAlloc, false),
|
||||
"memory_heap_inuse",
|
||||
formatBytes(memStats.HeapInuse, false),
|
||||
"memory_heap_sys",
|
||||
formatBytes(memStats.HeapSys, false),
|
||||
"memory_heap_objects",
|
||||
memStats.HeapObjects,
|
||||
)
|
||||
}
|
||||
66
cmd/backup/prune_backups.go
Normal file
66
cmd/backup/prune_backups.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// pruneBackups rotates away backups from local and remote storages using
|
||||
// the given configuration. In case the given configuration would delete all
|
||||
// backups, it does nothing instead and logs a warning.
|
||||
func (s *script) pruneBackups() error {
|
||||
if s.c.BackupRetentionDays < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
||||
|
||||
eg := errgroup.Group{}
|
||||
for _, backend := range s.storages {
|
||||
b := backend
|
||||
eg.Go(func() error {
|
||||
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stats.Lock()
|
||||
s.stats.Storages[b.Name()] = StorageStats{
|
||||
Total: stats.Total,
|
||||
Pruned: stats.Pruned,
|
||||
}
|
||||
s.stats.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errwrap.Wrap(err, "error pruning backups")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// skipPrune returns true if the given backend name is contained in the
|
||||
// list of skipped backends.
|
||||
func skipPrune(name string, skippedBackends []string) bool {
|
||||
return slices.ContainsFunc(
|
||||
skippedBackends,
|
||||
func(b string) bool {
|
||||
return strings.EqualFold(b, name) // ignore case on both sides
|
||||
},
|
||||
)
|
||||
}
|
||||
124
cmd/backup/run_script.go
Normal file
124
cmd/backup/run_script.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// runScript instantiates a new script object and orchestrates a backup run.
|
||||
// To ensure it runs mutually exclusive a global file lock is acquired before
|
||||
// it starts running. Any panic within the script will be recovered and returned
|
||||
// as an error.
|
||||
func runScript(c *Config) (err error) {
|
||||
defer func() {
|
||||
if derr := recover(); derr != nil {
|
||||
fmt.Printf("%s: %s\n", derr, debug.Stack())
|
||||
asErr, ok := derr.(error)
|
||||
if ok {
|
||||
err = errwrap.Wrap(asErr, "unexpected panic running script")
|
||||
} else {
|
||||
err = errwrap.Wrap(nil, fmt.Sprintf("%v", derr))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
s := newScript(c)
|
||||
|
||||
unlock, lockErr := s.lock("/var/lock/dockervolumebackup.lock")
|
||||
if lockErr != nil {
|
||||
err = errwrap.Wrap(lockErr, "error acquiring file lock")
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if derr := unlock(); derr != nil {
|
||||
err = errors.Join(err, errwrap.Wrap(derr, "error releasing file lock"))
|
||||
}
|
||||
}()
|
||||
|
||||
unset, err := s.c.applyEnv()
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error applying env")
|
||||
}
|
||||
defer func() {
|
||||
if derr := unset(); derr != nil {
|
||||
err = errors.Join(err, errwrap.Wrap(derr, "error unsetting environment variables"))
|
||||
}
|
||||
}()
|
||||
|
||||
if s.c != nil && s.c.BackupJitter > 0 {
|
||||
max := s.c.BackupJitter
|
||||
delay := time.Duration(rand.Int63n(int64(max) + 1))
|
||||
if delay > 0 {
|
||||
s.logger.Info(fmt.Sprintf("Applying startup jitter of %v", delay))
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
|
||||
if initErr := s.init(); initErr != nil {
|
||||
err = errwrap.Wrap(initErr, "error instantiating script")
|
||||
return
|
||||
}
|
||||
|
||||
return func() (err error) {
|
||||
scriptErr := func() error {
|
||||
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
|
||||
restartContainersAndServices, err := s.stopContainersAndServices()
|
||||
// The mechanism for restarting containers is not using hooks as it
|
||||
// should happen as soon as possible (i.e. before uploading backups or
|
||||
// similar).
|
||||
defer func() {
|
||||
if derr := restartContainersAndServices(); derr != nil {
|
||||
err = errors.Join(err, errwrap.Wrap(derr, "error restarting containers and services"))
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = s.createArchive()
|
||||
return
|
||||
})(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
||||
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
||||
if scriptErr != nil {
|
||||
return errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf(
|
||||
"error %v executing the script followed by %v calling the registered hooks",
|
||||
scriptErr,
|
||||
hookErr,
|
||||
),
|
||||
)
|
||||
}
|
||||
return errwrap.Wrap(
|
||||
hookErr,
|
||||
"the script ran successfully, but an error occurred calling the registered hooks",
|
||||
)
|
||||
}
|
||||
if scriptErr != nil {
|
||||
return errwrap.Wrap(scriptErr, "error running script")
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -6,33 +6,26 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/googledrive"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/local"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/s3"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/webdav"
|
||||
|
||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/containrrr/shoutrrr/pkg/router"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/leekchan/timeutil"
|
||||
"github.com/offen/envconfig"
|
||||
"github.com/otiai10/copy"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"github.com/nicholas-fedor/shoutrrr"
|
||||
"github.com/nicholas-fedor/shoutrrr/pkg/router"
|
||||
)
|
||||
|
||||
// script holds all the stateful information required to orchestrate a
|
||||
@@ -58,10 +51,10 @@ type script struct {
|
||||
// remote resources like the Docker engine or remote storage locations. All
|
||||
// reading from env vars or other configuration sources is expected to happen
|
||||
// in this method.
|
||||
func newScript() (*script, error) {
|
||||
func newScript(c *Config) *script {
|
||||
stdOut, logBuffer := buffer(os.Stdout)
|
||||
s := &script{
|
||||
c: &Config{},
|
||||
return &script{
|
||||
c: c,
|
||||
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||
stats: &Stats{
|
||||
StartTime: time.Now(),
|
||||
@@ -73,54 +66,36 @@ func newScript() (*script, error) {
|
||||
"Local": {},
|
||||
"Azure": {},
|
||||
"Dropbox": {},
|
||||
"GoogleDrive": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *script) init() error {
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
s.stats.EndTime = time.Now()
|
||||
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
|
||||
return nil
|
||||
})
|
||||
|
||||
envconfig.Lookup = func(key string) (string, bool) {
|
||||
value, okValue := os.LookupEnv(key)
|
||||
location, okFile := os.LookupEnv(key + "_FILE")
|
||||
|
||||
switch {
|
||||
case okValue && !okFile: // only value
|
||||
return value, true
|
||||
case !okValue && okFile: // only file
|
||||
contents, err := os.ReadFile(location)
|
||||
if err != nil {
|
||||
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
|
||||
return "", false
|
||||
}
|
||||
return string(contents), true
|
||||
case okValue && okFile: // both
|
||||
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
|
||||
return "", false
|
||||
default: // neither, ignore
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
if err := envconfig.Process("", s.c); err != nil {
|
||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
||||
}
|
||||
|
||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||
|
||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||
if tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
||||
return errwrap.Wrap(tErr, "unable to parse backup file extension template")
|
||||
}
|
||||
|
||||
var bf bytes.Buffer
|
||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||
"Extension": func() string {
|
||||
if s.c.BackupCompression == "none" {
|
||||
return "tar"
|
||||
}
|
||||
return fmt.Sprintf("tar.%s", s.c.BackupCompression)
|
||||
}(),
|
||||
}); tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
||||
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||
}
|
||||
s.file = bf.String()
|
||||
|
||||
@@ -136,17 +111,21 @@ func newScript() (*script, error) {
|
||||
if !os.IsNotExist(err) || dockerHostSet {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: failed to create docker client")
|
||||
return errwrap.Wrap(err, "failed to create docker client")
|
||||
}
|
||||
s.cli = cli
|
||||
s.registerHook(hookLevelPlumbing, func(err error) error {
|
||||
if err := s.cli.Close(); err != nil {
|
||||
return errwrap.Wrap(err, "failed to close docker client")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||
switch logType {
|
||||
case storage.LogLevelWarning:
|
||||
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
||||
case storage.LogLevelError:
|
||||
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
|
||||
default:
|
||||
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
||||
}
|
||||
@@ -168,7 +147,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
||||
return errwrap.Wrap(err, "error creating s3 storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, s3Backend)
|
||||
}
|
||||
@@ -183,7 +162,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
||||
return errwrap.Wrap(err, "error creating webdav storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, webdavBackend)
|
||||
}
|
||||
@@ -200,7 +179,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
||||
return errwrap.Wrap(err, "error creating ssh storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, sshBackend)
|
||||
}
|
||||
@@ -221,10 +200,12 @@ func newScript() (*script, error) {
|
||||
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
|
||||
Endpoint: s.c.AzureStorageEndpoint,
|
||||
RemotePath: s.c.AzureStoragePath,
|
||||
ConnectionString: s.c.AzureStorageConnectionString,
|
||||
AccessTier: s.c.AzureStorageAccessTier,
|
||||
}
|
||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
||||
return errwrap.Wrap(err, "error creating azure storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, azureBackend)
|
||||
}
|
||||
@@ -241,11 +222,26 @@ func newScript() (*script, error) {
|
||||
}
|
||||
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
||||
return errwrap.Wrap(err, "error creating dropbox storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, dropboxBackend)
|
||||
}
|
||||
|
||||
if s.c.GoogleDriveCredentialsJSON != "" {
|
||||
googleDriveConfig := googledrive.Config{
|
||||
CredentialsJSON: s.c.GoogleDriveCredentialsJSON,
|
||||
FolderID: s.c.GoogleDriveFolderID,
|
||||
ImpersonateSubject: s.c.GoogleDriveImpersonateSubject,
|
||||
Endpoint: s.c.GoogleDriveEndpoint,
|
||||
TokenURL: s.c.GoogleDriveTokenURL,
|
||||
}
|
||||
googleDriveBackend, err := googledrive.NewStorageBackend(googleDriveConfig, logFunc)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error creating googledrive storage backend")
|
||||
}
|
||||
s.storages = append(s.storages, googleDriveBackend)
|
||||
}
|
||||
|
||||
if s.c.EmailNotificationRecipient != "" {
|
||||
emailURL := fmt.Sprintf(
|
||||
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||
@@ -267,14 +263,14 @@ func newScript() (*script, error) {
|
||||
|
||||
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
||||
return errwrap.Wrap(nil, fmt.Sprintf("unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel))
|
||||
}
|
||||
s.hookLevel = hookLevel
|
||||
|
||||
if len(s.c.NotificationURLs) > 0 {
|
||||
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||
if senderErr != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
||||
return errwrap.Wrap(senderErr, "error creating sender")
|
||||
}
|
||||
s.sender = sender
|
||||
|
||||
@@ -282,13 +278,13 @@ func newScript() (*script, error) {
|
||||
tmpl.Funcs(templateHelpers)
|
||||
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
||||
return errwrap.Wrap(err, "unable to parse default notifications templates")
|
||||
}
|
||||
|
||||
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
||||
return errwrap.Wrap(err, "unable to parse user defined notifications templates")
|
||||
}
|
||||
}
|
||||
s.template = tmpl
|
||||
@@ -309,222 +305,5 @@ func newScript() (*script, error) {
|
||||
})
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// createArchive creates a tar archive of the configured backup location and
|
||||
// saves it to disk.
|
||||
func (s *script) createArchive() error {
|
||||
backupSources := s.c.BackupSources
|
||||
|
||||
if s.c.BackupFromSnapshot {
|
||||
s.logger.Warn(
|
||||
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||
)
|
||||
s.logger.Warn(
|
||||
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
|
||||
)
|
||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(backupSources); err != nil {
|
||||
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||
PreserveTimes: true,
|
||||
PreserveOwner: true,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||
)
|
||||
}
|
||||
|
||||
tarFile := s.file
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(tarFile); err != nil {
|
||||
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||
if err != nil {
|
||||
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
|
||||
}
|
||||
|
||||
var filesEligibleForBackup []string
|
||||
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||
return nil
|
||||
}
|
||||
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
||||
}
|
||||
|
||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
||||
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
||||
// In case no passphrase is given it returns early, leaving the backup file
|
||||
// untouched.
|
||||
func (s *script) encryptArchive() error {
|
||||
if s.c.GpgPassphrase == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(gpgFile); err != nil {
|
||||
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
outFile, err := os.Create(gpgFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
_, name := path.Split(s.file)
|
||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
src, err := os.Open(s.file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
|
||||
}
|
||||
|
||||
s.file = gpgFile
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyArchive makes sure the backup file is copied to both local and remote locations
|
||||
// as per the given configuration.
|
||||
func (s *script) copyArchive() error {
|
||||
_, name := path.Split(s.file)
|
||||
if stat, err := os.Stat(s.file); err != nil {
|
||||
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
|
||||
} else {
|
||||
size := stat.Size()
|
||||
s.stats.BackupFile = BackupFileStats{
|
||||
Size: uint64(size),
|
||||
Name: name,
|
||||
FullPath: s.file,
|
||||
}
|
||||
}
|
||||
|
||||
eg := errgroup.Group{}
|
||||
for _, backend := range s.storages {
|
||||
b := backend
|
||||
eg.Go(func() error {
|
||||
return b.Copy(s.file)
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return fmt.Errorf("copyArchive: error copying archive: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneBackups rotates away backups from local and remote storages using
|
||||
// the given configuration. In case the given configuration would delete all
|
||||
// backups, it does nothing instead and logs a warning.
|
||||
func (s *script) pruneBackups() error {
|
||||
if s.c.BackupRetentionDays < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
||||
|
||||
eg := errgroup.Group{}
|
||||
for _, backend := range s.storages {
|
||||
b := backend
|
||||
eg.Go(func() error {
|
||||
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stats.Lock()
|
||||
s.stats.Storages[b.Name()] = StorageStats{
|
||||
Total: stats.Total,
|
||||
Pruned: stats.Pruned,
|
||||
}
|
||||
s.stats.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// must exits the script run prematurely in case the given error
|
||||
// is non-nil.
|
||||
func (s *script) must(err error) {
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("Fatal error running backup: %s", err),
|
||||
)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// skipPrune returns true if the given backend name is contained in the
|
||||
// list of skipped backends.
|
||||
func skipPrune(name string, skippedBackends []string) bool {
|
||||
return slices.ContainsFunc(
|
||||
skippedBackends,
|
||||
func(b string) bool {
|
||||
return strings.EqualFold(b, name) // ignore case on both sides
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -10,29 +13,30 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command/service/progress"
|
||||
"github.com/docker/docker/api/types"
|
||||
ctr "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
||||
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
||||
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, swarm.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err)
|
||||
return nil, errwrap.Wrap(err, fmt.Sprintf("error inspecting service %s", serviceID))
|
||||
}
|
||||
serviceMode := &service.Spec.Mode
|
||||
switch {
|
||||
case serviceMode.Replicated != nil:
|
||||
serviceMode.Replicated.Replicas = &replicas
|
||||
default:
|
||||
return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name)
|
||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("service to be scaled %s has to be in replicated mode", service.Spec.Name))
|
||||
}
|
||||
|
||||
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, swarm.ServiceUpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scaleService: error updating service: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error updating service")
|
||||
}
|
||||
|
||||
discardWriter := &noopWriteCloser{io.Discard}
|
||||
@@ -51,21 +55,24 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf(
|
||||
"awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d",
|
||||
return errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf(
|
||||
"timed out after waiting %s for service %s to reach desired container count of %d",
|
||||
timeoutAfter,
|
||||
serviceID,
|
||||
count,
|
||||
),
|
||||
)
|
||||
case <-poll.C:
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
containers, err := cli.ContainerList(context.Background(), ctr.ListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("awaitContainerCount: error listing containers: %w", err)
|
||||
return errwrap.Wrap(err, "error listing containers")
|
||||
}
|
||||
if len(containers) == count {
|
||||
return nil
|
||||
@@ -74,6 +81,31 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
||||
}
|
||||
}
|
||||
|
||||
func isSwarm(c interface {
|
||||
Info(context.Context) (system.Info, error)
|
||||
}) (bool, error) {
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
return false, errwrap.Wrap(err, "error getting docker info")
|
||||
}
|
||||
return info.Swarm.LocalNodeState != "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive && info.Swarm.ControlAvailable, nil
|
||||
}
|
||||
|
||||
func hasLabel(labels map[string]string, key, value string) bool {
|
||||
val, ok := labels[key]
|
||||
return ok && val == value
|
||||
}
|
||||
|
||||
func checkStopLabels(labels map[string]string, stopDuringBackupLabelValue string, stopDuringBackupNoRestartLabelValue string) (bool, bool, error) {
|
||||
hasStopDuringBackupLabel := hasLabel(labels, "docker-volume-backup.stop-during-backup", stopDuringBackupLabelValue)
|
||||
hasStopDuringBackupNoRestartLabel := hasLabel(labels, "docker-volume-backup.stop-during-backup-no-restart", stopDuringBackupNoRestartLabelValue)
|
||||
if hasStopDuringBackupLabel && hasStopDuringBackupNoRestartLabel {
|
||||
return hasStopDuringBackupLabel, hasStopDuringBackupNoRestartLabel, errwrap.Wrap(nil, "both docker-volume-backup.stop-during-backup and docker-volume-backup.stop-during-backup-no-restart have been set, cannot continue")
|
||||
}
|
||||
|
||||
return hasStopDuringBackupLabel, hasStopDuringBackupNoRestartLabel, nil
|
||||
}
|
||||
|
||||
// stopContainersAndServices stops all Docker containers that are marked as to being
|
||||
// stopped during the backup and returns a function that can be called to
|
||||
// restart everything that has been stopped.
|
||||
@@ -82,11 +114,10 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
dockerInfo, err := s.cli.Info(context.Background())
|
||||
isDockerSwarm, err := isSwarm(s.cli)
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
|
||||
return noop, errwrap.Wrap(err, "error determining swarm state")
|
||||
}
|
||||
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
|
||||
|
||||
labelValue := s.c.BackupStopDuringBackupLabel
|
||||
if s.c.BackupStopContainerLabel != "" {
|
||||
@@ -97,53 +128,74 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
||||
)
|
||||
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
||||
return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
||||
return noop, errwrap.Wrap(nil, "both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
||||
}
|
||||
labelValue = s.c.BackupStopContainerLabel
|
||||
}
|
||||
|
||||
filterMatchLabel := fmt.Sprintf(
|
||||
stopDuringBackupLabel := fmt.Sprintf(
|
||||
"docker-volume-backup.stop-during-backup=%s",
|
||||
labelValue,
|
||||
)
|
||||
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
stopDuringBackupNoRestartLabel := fmt.Sprintf(
|
||||
"docker-volume-backup.stop-during-backup-no-restart=%s",
|
||||
s.c.BackupStopDuringBackupNoRestartLabel,
|
||||
)
|
||||
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), ctr.ListOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
|
||||
return noop, errwrap.Wrap(err, "error querying for containers")
|
||||
}
|
||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: filterMatchLabel,
|
||||
}),
|
||||
})
|
||||
|
||||
var containersToStop []handledContainer
|
||||
for _, c := range allContainers {
|
||||
hasStopDuringBackupLabel, hasStopDuringBackupNoRestartLabel, err := checkStopLabels(c.Labels, labelValue, s.c.BackupStopDuringBackupNoRestartLabel)
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
|
||||
return noop, errwrap.Wrap(err, "error querying for containers to stop")
|
||||
}
|
||||
|
||||
if !hasStopDuringBackupLabel && !hasStopDuringBackupNoRestartLabel {
|
||||
continue
|
||||
}
|
||||
|
||||
containersToStop = append(containersToStop, handledContainer{
|
||||
summary: c,
|
||||
restart: !hasStopDuringBackupNoRestartLabel,
|
||||
})
|
||||
}
|
||||
|
||||
var allServices []swarm.Service
|
||||
var servicesToScaleDown []handledSwarmService
|
||||
if isDockerSwarm {
|
||||
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||
allServices, err = s.cli.ServiceList(context.Background(), swarm.ServiceListOptions{Status: true})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
|
||||
return noop, errwrap.Wrap(err, "error querying for services")
|
||||
}
|
||||
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: filterMatchLabel,
|
||||
}),
|
||||
Status: true,
|
||||
})
|
||||
for _, s := range matchingServices {
|
||||
|
||||
for _, service := range allServices {
|
||||
hasStopDuringBackupLabel, hasStopDuringBackupNoRestartLabel, err := checkStopLabels(service.Spec.Labels, labelValue, s.c.BackupStopDuringBackupNoRestartLabel)
|
||||
if err != nil {
|
||||
return noop, errwrap.Wrap(err, "error querying for services to scale down")
|
||||
}
|
||||
|
||||
if !hasStopDuringBackupLabel && !hasStopDuringBackupNoRestartLabel {
|
||||
continue
|
||||
}
|
||||
|
||||
if service.Spec.Mode.Replicated == nil {
|
||||
return noop, errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf("only replicated services can be restarted, but found a label on service %s", service.Spec.Name),
|
||||
)
|
||||
}
|
||||
|
||||
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
|
||||
serviceID: s.ID,
|
||||
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
|
||||
serviceID: service.ID,
|
||||
initialReplicaCount: *service.Spec.Mode.Replicated.Replicas,
|
||||
restart: !hasStopDuringBackupNoRestartLabel,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
|
||||
@@ -152,17 +204,20 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
|
||||
if isDockerSwarm {
|
||||
for _, container := range containersToStop {
|
||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
||||
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||
if swarmServiceID, ok := container.summary.Labels["com.docker.swarm.service.id"]; ok {
|
||||
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, swarm.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
|
||||
return noop, errwrap.Wrap(err, fmt.Sprintf("error querying for parent service with ID %s", swarmServiceID))
|
||||
}
|
||||
for label := range parentService.Spec.Labels {
|
||||
if label == "docker-volume-backup.stop-during-backup" {
|
||||
return noop, fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
||||
container.Names[0],
|
||||
return noop, errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf(
|
||||
"container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
||||
container.summary.Names[0],
|
||||
parentService.Spec.Name,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -172,27 +227,29 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Stopping %d out of %d running container(s) as they were labeled %s.",
|
||||
"Stopping %d out of %d running container(s) as they were labeled %s or %s.",
|
||||
len(containersToStop),
|
||||
len(allContainers),
|
||||
filterMatchLabel,
|
||||
stopDuringBackupLabel,
|
||||
stopDuringBackupNoRestartLabel,
|
||||
),
|
||||
)
|
||||
if isDockerSwarm {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Scaling down %d out of %d active service(s) as they were labeled %s.",
|
||||
"Scaling down %d out of %d active service(s) as they were labeled %s or %s.",
|
||||
len(servicesToScaleDown),
|
||||
len(allServices),
|
||||
filterMatchLabel,
|
||||
stopDuringBackupLabel,
|
||||
stopDuringBackupNoRestartLabel,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
var stoppedContainers []types.Container
|
||||
var stoppedContainers []handledContainer
|
||||
var stopErrors []error
|
||||
for _, container := range containersToStop {
|
||||
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
|
||||
if err := s.cli.ContainerStop(context.Background(), container.summary.ID, ctr.StopOptions{}); err != nil {
|
||||
stopErrors = append(stopErrors, err)
|
||||
} else {
|
||||
stoppedContainers = append(stoppedContainers, container)
|
||||
@@ -245,18 +302,25 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
var initialErr error
|
||||
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
||||
if len(allErrors) != 0 {
|
||||
initialErr = fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
|
||||
len(allErrors),
|
||||
initialErr = errwrap.Wrap(
|
||||
errors.Join(allErrors...),
|
||||
fmt.Sprintf(
|
||||
"%d error(s) stopping containers",
|
||||
len(allErrors),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return func() error {
|
||||
var restartErrors []error
|
||||
var restartedContainers []handledContainer
|
||||
matchedServices := map[string]bool{}
|
||||
for _, container := range stoppedContainers {
|
||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok && isDockerSwarm {
|
||||
if !container.restart {
|
||||
continue
|
||||
}
|
||||
|
||||
if swarmServiceID, ok := container.summary.Labels["com.docker.swarm.service.id"]; ok && isDockerSwarm {
|
||||
if _, ok := matchedServices[swarmServiceID]; ok {
|
||||
continue
|
||||
}
|
||||
@@ -264,33 +328,40 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
// in case a container was part of a swarm service, the service requires to
|
||||
// be force updated instead of restarting the container as it would otherwise
|
||||
// remain in a "completed" state
|
||||
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, swarm.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
restartErrors = append(
|
||||
restartErrors,
|
||||
fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err),
|
||||
errwrap.Wrap(err, "error looking up parent service"),
|
||||
)
|
||||
continue
|
||||
}
|
||||
service.Spec.TaskTemplate.ForceUpdate += 1
|
||||
if _, err := s.cli.ServiceUpdate(
|
||||
context.Background(), service.ID,
|
||||
service.Version, service.Spec, types.ServiceUpdateOptions{},
|
||||
service.Version, service.Spec, swarm.ServiceUpdateOptions{},
|
||||
); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||
if err := s.cli.ContainerStart(context.Background(), container.summary.ID, ctr.StartOptions{}); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
} else {
|
||||
restartedContainers = append(restartedContainers, container)
|
||||
}
|
||||
}
|
||||
|
||||
var scaleUpErrors concurrentSlice[error]
|
||||
var scaledUpServices []handledSwarmService
|
||||
if isDockerSwarm {
|
||||
wg := &sync.WaitGroup{}
|
||||
for _, svc := range servicesToScaleDown {
|
||||
if !svc.restart {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(svc handledSwarmService) {
|
||||
defer wg.Done()
|
||||
@@ -299,6 +370,9 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
scaleDownErrors.append(err)
|
||||
return
|
||||
}
|
||||
|
||||
scaledUpServices = append(scaledUpServices, svc)
|
||||
|
||||
for _, warning := range warnings {
|
||||
s.logger.Warn(
|
||||
fmt.Sprintf("The Docker API returned a warning when scaling up service %s: %s", svc.serviceID, warning),
|
||||
@@ -311,23 +385,27 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
|
||||
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
||||
if len(allErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w",
|
||||
len(allErrors),
|
||||
return errwrap.Wrap(
|
||||
errors.Join(allErrors...),
|
||||
fmt.Sprintf(
|
||||
"%d error(s) restarting containers and services",
|
||||
len(allErrors),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Restarted %d container(s).",
|
||||
"Restarted %d out of %d stopped container(s).",
|
||||
len(restartedContainers),
|
||||
len(stoppedContainers),
|
||||
),
|
||||
)
|
||||
if isDockerSwarm {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Scaled %d service(s) back up.",
|
||||
"Scaled %d out of %d scaled down service(s) back up.",
|
||||
len(scaledUpServices),
|
||||
len(scaledDownServices),
|
||||
),
|
||||
)
|
||||
|
||||
98
cmd/backup/stop_restart_test.go
Normal file
98
cmd/backup/stop_restart_test.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
)
|
||||
|
||||
type mockInfoClient struct {
|
||||
result system.Info
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockInfoClient) Info(context.Context) (system.Info, error) {
|
||||
return m.result, m.err
|
||||
}
|
||||
|
||||
func TestIsSwarm(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
client *mockInfoClient
|
||||
expected bool
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
"swarm",
|
||||
&mockInfoClient{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: swarm.LocalNodeStateActive,
|
||||
ControlAvailable: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"worker",
|
||||
&mockInfoClient{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: swarm.LocalNodeStateActive,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"compose",
|
||||
&mockInfoClient{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: swarm.LocalNodeStateInactive,
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"balena",
|
||||
&mockInfoClient{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"error",
|
||||
&mockInfoClient{
|
||||
err: errors.New("the dinosaurs escaped"),
|
||||
},
|
||||
false,
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := isSwarm(test.client)
|
||||
if (err != nil) != test.expectError {
|
||||
t.Errorf("Unexpected error value %v", err)
|
||||
}
|
||||
if test.expected != result {
|
||||
t.Errorf("Expected %v, got %v", test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
3
cmd/backup/testdata/braces.env
vendored
Normal file
3
cmd/backup/testdata/braces.env
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
FOO=${bar:-qux}
|
||||
BAR=xxx
|
||||
BAZ=$NOPE
|
||||
7
cmd/backup/testdata/comments.env
vendored
Normal file
7
cmd/backup/testdata/comments.env
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# This is a comment about `why` things are here
|
||||
# FOO="${bar:-qux}"
|
||||
# e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`. Expansion happens before`
|
||||
|
||||
BAR=xxx
|
||||
|
||||
BAZ=$QUX
|
||||
2
cmd/backup/testdata/default.env
vendored
Normal file
2
cmd/backup/testdata/default.env
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
FOO=bar
|
||||
BAZ=qux
|
||||
4
cmd/backup/testdata/expansion.env
vendored
Normal file
4
cmd/backup/testdata/expansion.env
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
BAR=xxx
|
||||
FOO=${BAR}
|
||||
BAZ=$BAR
|
||||
QUX=${QUX}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -9,6 +9,11 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ctr "github.com/docker/docker/api/types/container"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
var noop = func() error { return nil }
|
||||
@@ -20,7 +25,7 @@ func remove(location string) error {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error checking for existence of `%s`", location))
|
||||
}
|
||||
if fi.IsDir() {
|
||||
err = os.RemoveAll(location)
|
||||
@@ -28,7 +33,7 @@ func remove(location string) error {
|
||||
err = os.Remove(location)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error removing `%s", location))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -47,7 +52,7 @@ type bufferingWriter struct {
|
||||
|
||||
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||
if n, err := b.buf.Write(p); err != nil {
|
||||
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
|
||||
return n, errwrap.Wrap(err, "error writing to buffer")
|
||||
}
|
||||
return b.writer.Write(p)
|
||||
}
|
||||
@@ -60,9 +65,15 @@ func (noopWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type handledContainer struct {
|
||||
summary ctr.Summary
|
||||
restart bool
|
||||
}
|
||||
|
||||
type handledSwarmService struct {
|
||||
serviceID string
|
||||
initialReplicaCount uint64
|
||||
restart bool
|
||||
}
|
||||
|
||||
type concurrentSlice[T any] struct {
|
||||
@@ -79,3 +90,22 @@ func (c *concurrentSlice[T]) append(v T) {
|
||||
func (c *concurrentSlice[T]) value() []T {
|
||||
return c.val
|
||||
}
|
||||
|
||||
// checkCronSchedule detects whether the given cron expression will actually
|
||||
// ever be executed or not.
|
||||
func checkCronSchedule(expression string) (ok bool) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
sched, err := cron.ParseStandard(expression)
|
||||
if err != nil {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
sched.Next(now) // panics when the cron would never run
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ GEM
|
||||
rb-fsevent (0.11.2)
|
||||
rb-inotify (0.10.1)
|
||||
ffi (~> 1.0)
|
||||
rexml (3.2.6)
|
||||
rexml (3.4.2)
|
||||
rouge (3.30.0)
|
||||
safe_yaml (1.0.5)
|
||||
sassc (2.4.0)
|
||||
@@ -67,7 +67,7 @@ GEM
|
||||
terminal-table (3.0.2)
|
||||
unicode-display_width (>= 1.1.1, < 3)
|
||||
unicode-display_width (2.4.2)
|
||||
webrick (1.8.1)
|
||||
webrick (1.8.2)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
||||
@@ -30,6 +30,6 @@ nav_external_links:
|
||||
url: https://github.com/offen/docker-volume-backup
|
||||
|
||||
footer_content: >-
|
||||
Copyright © 2021 Offen Authors and contributors.
|
||||
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License.</a><br>
|
||||
Something missing, unclear or not working? Open <a href="https://github.com/offen/docker-volume-backup/issues">an issue</a>.
|
||||
|
||||
@@ -14,8 +14,6 @@ Be aware that this mechanism looks at __all files in the target bucket or archiv
|
||||
In case you need to use a target that cannot be used exclusively for your backups, you can configure `BACKUP_PRUNING_PREFIX` to limit which files are considered eligible for deletion:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
|
||||
@@ -3,15 +3,7 @@ title: Encrypt backups using GPG
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
nav_exclude: true
|
||||
---
|
||||
|
||||
# Encrypt backups using GPG
|
||||
|
||||
The image supports encrypting backups using GPG out of the box.
|
||||
In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||
|
||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||
|
||||
```console
|
||||
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
||||
```
|
||||
See: [Encrypt Backups](encrypt-backups)
|
||||
|
||||
30
docs/how-tos/encrypt-backups.md
Normal file
30
docs/how-tos/encrypt-backups.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
title: Encrypting backups
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
---
|
||||
|
||||
# Encrypting backups
|
||||
|
||||
The image supports encrypting backups using one of two available methods: **GPG** or **[age](https://age-encryption.org/)**
|
||||
|
||||
## Using GPG encryption
|
||||
|
||||
In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY_RING` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||
|
||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||
|
||||
```console
|
||||
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
||||
```
|
||||
|
||||
## Using age encryption
|
||||
|
||||
age allows backups to be encrypted with either a symmetric key (password) or a public key. One of those options are available for use.
|
||||
|
||||
Given `AGE_PASSPHRASE` being provided, the backup archive will be encrypted with the passphrase and saved as a `.age` file instead. Refer to age documentation for how to properly decrypt.
|
||||
|
||||
Given `AGE_PUBLIC_KEYS` being provided (allowing multiple by separating each public key with `,`), the backup archive will be encrypted with the provided public keys. It will also result in the archive being saved as a `.age` file.
|
||||
|
||||
You can use SSH keys in addition to `age` keys for encryption; `AGE_PUBLIC_KEYS` accepts both.
|
||||
@@ -20,8 +20,6 @@ RUN apk add rsync
|
||||
Using this image, you can now omit configuring any of the supported storage backends, and instead define your own mechanism in a `docker-volume-backup.copy-post` label:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: your-custom-image
|
||||
@@ -33,7 +31,7 @@ services:
|
||||
- docker-volume-backup.copy-post=/bin/sh -c 'rsync $$COMMAND_RUNTIME_ARCHIVE_FILEPATH /destination'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
# other services defined here ...
|
||||
volumes:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Replace deprecated BACKUP_FROM_SNAPSHOT usage
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 16
|
||||
nav_order: 17
|
||||
---
|
||||
|
||||
# Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
|
||||
@@ -11,8 +11,6 @@ Starting with version 2.15.0, the `BACKUP_FROM_SNAPSHOT` feature has been deprec
|
||||
If you need to prepare your sources before the backup is taken, use `archive-pre`, `archive-post` and an intermediate volume:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
my_app:
|
||||
build: .
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Replace deprecated BACKUP_STOP_CONTAINER_LABEL setting
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 19
|
||||
nav_order: 20
|
||||
---
|
||||
|
||||
# Replace deprecated `BACKUP_STOP_CONTAINER_LABEL` setting
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Replace deprecated exec-pre and exec-post labels
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 17
|
||||
nav_order: 18
|
||||
---
|
||||
|
||||
# Replace deprecated `exec-pre` and `exec-post` labels
|
||||
|
||||
@@ -9,6 +9,11 @@ parent: How Tos
|
||||
|
||||
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
||||
|
||||
{: .important }
|
||||
In a multi-node Swarm setup, commands can currently only be run on the node the `offen/docker-volume-backup` container is running on.
|
||||
Labeled containers on other nodes are not visible to the backup command.
|
||||
|
||||
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
||||
|
||||
- `archive` (the tar archive is created)
|
||||
@@ -23,8 +28,6 @@ the `docker-volume-backup` container as shown in the Quickstart example.
|
||||
Taking a database dump using `mysqldump` would look like this:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
database:
|
||||
@@ -46,9 +49,11 @@ If you have more than one `docker-volume-backup` container (possibly across seve
|
||||
multiple backup schedules, you will need to use `EXEC_LABEL` in the configuration and a `docker-volume-backup.exec-label` label on each
|
||||
container using custom commands to ensure that the commands are only run by the correct `docker-volume-backup` instance.
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
{: .important }
|
||||
In case you use `EXEC_LABEL` together with configuration mounted from `conf.d` it's important to understand that a distinct `EXEC_LABEL` __should be set in each configuration__.
|
||||
Else, schedules that do not specify an `EXEC_LABEL` will still trigger commands on all containers with such labels, no matter whether they specify `docker-volume-backup.exec-label` or not.
|
||||
|
||||
```yml
|
||||
services:
|
||||
database:
|
||||
image: mariadb
|
||||
@@ -78,8 +83,6 @@ By default the backup command is executed by the user provided by the container'
|
||||
It is possible to specify a custom user that is used to run commands in dedicated labels with the format `docker-volume-backup.[step]-[pre|post].user`:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea
|
||||
|
||||
@@ -10,8 +10,6 @@ nav_order: 11
|
||||
Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
|
||||
@@ -12,8 +12,6 @@ As the image is designed to be as small as possible, additional timezone data is
|
||||
In case you want to run your cron rules in your local timezone (respecting DST and similar), you can mount your Docker host's `/etc/timezone` and `/etc/localtime` in read-only mode:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
|
||||
@@ -33,5 +33,7 @@ Note: Using the "Generated access token" in the app console is not supported, as
|
||||
|
||||
## Other parameters
|
||||
|
||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, you can only write in the app's directory!
|
||||
This means, that `DROPBOX_REMOTE_PATH` must start with e.g. `/Apps/YOUR_APP_NAME` or `/Apps/YOUR_APP_NAME/some_sub_dir`
|
||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, `DROPBOX_REMOTE_PATH` will be a relative path under the App folder!
|
||||
(_For example, DROPBOX_REMOTE_PATH=/somedir means the backup file will be uploaded to /Apps/myapp/somedir_)
|
||||
On the other hand if you chose `Full Dropbox` access, the value for `DROPBOX_REMOTE_PATH` will represent an absolute path inside your Dropbox storage area.
|
||||
(_Still considering the same example above, the backup file will be uploaded to /somedir in your Dropbox root_)
|
||||
|
||||
@@ -12,8 +12,6 @@ parent: How Tos
|
||||
To send out email notifications on failed backup runs, provide SMTP credentials, a sender and a recipient:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
@@ -25,7 +23,7 @@ services:
|
||||
Notification backends other than email are also supported.
|
||||
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
||||
|
||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||
[shoutrrr-docs]: https://shoutrrr.nickfedor.com/v0.10.3/services/overview/
|
||||
|
||||
{: .note }
|
||||
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
||||
@@ -48,7 +46,7 @@ The files have to define [nested templates](https://pkg.go.dev/text/template#hdr
|
||||
{% raw %}
|
||||
```
|
||||
{{ define "title_success" -}}
|
||||
✅ Successfully ran backup {{ .Config.BackupStopContainerLabel }}
|
||||
✅ Successfully ran backup {{ .Config.BackupStopDuringBackupLabel }}
|
||||
{{- end }}
|
||||
|
||||
{{ define "body_success" -}}
|
||||
@@ -122,11 +120,11 @@ If such a URL contains special characters (e.g. commas) these need to be URL enc
|
||||
To obtain an encoded version of your URL, you can use the CLI tool provided by `shoutrrr` (which is the library used for sending notifications):
|
||||
|
||||
```
|
||||
docker run --rm -ti containrrr/shoutrrr generate [service]
|
||||
docker run --rm -ti ghcr.io/nicholas-fedor/shoutrrr generate [service]
|
||||
```
|
||||
|
||||
where service is any of the [supported services][shoutrrr-docs], e.g. for SMTP:
|
||||
|
||||
```
|
||||
docker run --rm -ti containrrr/shoutrrr generate smtp
|
||||
docker run --rm -ti ghcr.io/nicholas-fedor/shoutrrr generate smtp
|
||||
```
|
||||
|
||||
@@ -17,8 +17,6 @@ By default, any container that is labeled `docker-volume-backup.stop-during-back
|
||||
In case you need more fine grained control about which containers should be stopped (e.g. when backing up multiple volumes on different schedules), you can set the `BACKUP_STOP_DURING_BACKUP_LABEL` environment variable and then use the same value for labeling:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
app:
|
||||
# definition for app ...
|
||||
@@ -36,3 +34,29 @@ services:
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Stop containers during backup without restarting
|
||||
|
||||
Sometimes you might want to stop containers for the backup but not have them start again automatically, for example if they are normally started by an external process or scheduler.
|
||||
|
||||
For this use case, you can use the label `docker-volume-backup.stop-during-backup-no-restart`.
|
||||
This label is **mutually exclusive** with `docker-volume-backup.stop-during-backup` and performs the same stop operation but skips restarting the container after the backup has finished.
|
||||
|
||||
```yml
|
||||
services:
|
||||
app:
|
||||
# definition for app ...
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup-no-restart=service2
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
environment:
|
||||
BACKUP_STOP_DURING_BACKUP__NO_RESTART_LABEL: service2
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
title: Update deprecated email configuration
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 18
|
||||
nav_order: 19
|
||||
---
|
||||
|
||||
# Update deprecated email configuration
|
||||
|
||||
36
docs/how-tos/use-as-non-root.md
Normal file
36
docs/how-tos/use-as-non-root.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
title: Use the image as a non-root user
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 16
|
||||
---
|
||||
|
||||
# Use the image as a non-root user
|
||||
|
||||
{: .important }
|
||||
Running as a non-root user limits interaction with the Docker Daemon.
|
||||
If you want to stop and restart containers and services during backup, and the host's Docker daemon is running as root, you will also need to run this tool as root.
|
||||
|
||||
By default, this image executes backups using the `root` user.
|
||||
In case you prefer to use a different user, you can use Docker's [`user`](https://docs.docker.com/engine/reference/run/#user) option, passing the user and group id:
|
||||
|
||||
```console
|
||||
docker run --rm \
|
||||
-v data:/backup/data \
|
||||
--env AWS_ACCESS_KEY_ID="<xxx>" \
|
||||
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
||||
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
||||
--entrypoint backup \
|
||||
--user 1000:1000 \
|
||||
offen/docker-volume-backup:v2
|
||||
```
|
||||
|
||||
or in a compose file:
|
||||
|
||||
```yml
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
user: 1000:1000
|
||||
# further configuration omitted ...
|
||||
```
|
||||
@@ -8,7 +8,8 @@ nav_order: 13
|
||||
# Use with Docker Swarm
|
||||
|
||||
{: .note }
|
||||
The mechanisms described in this page __do only apply when Docker is running in [Swarm mode][swarm]__.
|
||||
The mechanisms described in this page __do only apply when Docker is running in [Swarm mode][swarm]__ and __when placing the `docker-volume-backup` container on a manager node__.
|
||||
Containers that are placed on worker nodes function as if the Docker engine is not running in Swarm mode, i.e. there is no access to services and there is no way to interact with resources that are running on different host nodes.
|
||||
|
||||
[swarm]: https://docs.docker.com/engine/swarm/
|
||||
|
||||
|
||||
@@ -7,13 +7,13 @@ nav_order: 1
|
||||
# offen/docker-volume-backup
|
||||
{:.no_toc}
|
||||
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage.
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox, Google Drive or SSH compatible storage.
|
||||
{: .fs-6 .fw-300 }
|
||||
|
||||
---
|
||||
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) companion container to an existing Docker setup.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage (or any combination thereof) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for (failed) backup runs__.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox, Google Drive or SSH compatible storage (or any combination thereof) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for (failed) backup runs__.
|
||||
|
||||
{: .note }
|
||||
Code and documentation for `v1` versions are found on [this branch][v1-branch].
|
||||
@@ -32,8 +32,6 @@ Code and documentation for `v1` versions are found on [this branch][v1-branch].
|
||||
Add a `backup` service to your compose setup and mount the volumes you would like to see backed up:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
volume-consumer:
|
||||
build:
|
||||
@@ -88,7 +86,7 @@ docker run --rm \
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
### Available image registries
|
||||
## Available image registries
|
||||
|
||||
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
|
||||
@@ -100,12 +98,17 @@ docker pull ghcr.io/offen/docker-volume-backup:v2
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
|
||||
## Supported Engines
|
||||
|
||||
This tool is developed and tested against the Docker CE engine exclusively.
|
||||
While it may work against different implementations (e.g. Balena Engine), there are no guarantees about support for non-Docker engines.
|
||||
|
||||
## Differences to `jareware/docker-volume-backup`
|
||||
|
||||
This image is heavily inspired by `jareware/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||
This version is roughly 1/25 in compressed size (it's ~15MB).
|
||||
This version is roughly 1/20 in compressed size (it's ~25MB).
|
||||
- The original image uses a shell script, when this version is written in Go.
|
||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies.
|
||||
This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO.
|
||||
|
||||
@@ -15,8 +15,6 @@ This doc lists configuration for some real-world use cases that you can copy and
|
||||
## Backing up to AWS S3
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -36,8 +34,6 @@ volumes:
|
||||
## Backing up to Filebase
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -58,8 +54,6 @@ volumes:
|
||||
## Backing up to MinIO
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -81,8 +75,6 @@ volumes:
|
||||
## Backing up to MinIO (using Docker secrets)
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -112,8 +104,6 @@ secrets:
|
||||
## Backing up to WebDAV
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -134,8 +124,6 @@ volumes:
|
||||
## Backing up to SSH
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -157,8 +145,6 @@ volumes:
|
||||
## Backing up to Azure Blob Storage
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -180,8 +166,6 @@ volumes:
|
||||
See [Dropbox Setup](../how-tos/set-up-dropbox.md) on how to get the appropriate environment values.
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -190,7 +174,7 @@ services:
|
||||
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
||||
DROPBOX_APP_KEY: APP_KEY # replace
|
||||
DROPBOX_APP_SECRET: APP_SECRET # replace
|
||||
DROPBOX_REMOTE_PATH: /Apps/my-test-app/some_subdir # replace
|
||||
DROPBOX_REMOTE_PATH: /somedir # replace
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
@@ -202,8 +186,6 @@ volumes:
|
||||
## Backing up locally
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -223,8 +205,6 @@ volumes:
|
||||
## Backing up to AWS S3 as well as locally
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -245,8 +225,6 @@ volumes:
|
||||
## Running on a custom cron schedule
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -268,8 +246,6 @@ volumes:
|
||||
## Rotating away backups that are older than 7 days
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -289,11 +265,9 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Encrypting your backups using GPG
|
||||
## Encrypting your backups symmetrically using GPG
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
@@ -311,16 +285,39 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using mysqldump to prepare the backup
|
||||
## Encrypting your backups asymmetrically using GPG
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
environment:
|
||||
AWS_S3_BUCKET_NAME: backup-bucket
|
||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
GPG_PUBLIC_KEY_RING: |
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||
...
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using mariadb-dump/mysqldump to prepare the backup
|
||||
|
||||
```yml
|
||||
services:
|
||||
database:
|
||||
image: mariadb:latest
|
||||
labels:
|
||||
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||
- docker-volume-backup.archive-pre=/bin/sh -c 'mariadb-dump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||
volumes:
|
||||
- data:/tmp/dumps
|
||||
backup:
|
||||
@@ -331,7 +328,7 @@ services:
|
||||
volumes:
|
||||
- ./local:/archive
|
||||
- data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
@@ -340,8 +337,6 @@ volumes:
|
||||
## Running multiple instances in the same setup
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data_1` and `data_2` volumes here
|
||||
backup_1: &backup_service
|
||||
@@ -371,3 +366,22 @@ volumes:
|
||||
data_1:
|
||||
data_2:
|
||||
```
|
||||
|
||||
## Running as a non-root user
|
||||
|
||||
```yml
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
user: 1000:1000
|
||||
environment:
|
||||
AWS_S3_BUCKET_NAME: backup-bucket
|
||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
@@ -9,7 +9,7 @@ nav_order: 2
|
||||
Backup targets, schedule and retention are configured using environment variables.
|
||||
|
||||
{: .note }
|
||||
You can use any environment variable from below also with a `_FILE` suffix to be able to load the value from a file.
|
||||
As per established convention, you can use any environment variable key from below with a `_FILE` suffix in order to load the value from a file instead.
|
||||
This is typically useful when using [Docker Secrets](https://docs.docker.com/engine/swarm/secrets/) or similar.
|
||||
Note that secrets will not be trimmed of leading or trailing whitespace.
|
||||
|
||||
@@ -17,42 +17,77 @@ Note that secrets will not be trimmed of leading or trailing whitespace.
|
||||
In case you encounter double quoted values in your runtime configuration you might still be using an [older version of `docker-compose`][compose-issue].
|
||||
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
||||
|
||||
You can populate below template according to your requirements and use it as your `env_file`:
|
||||
You can populate below template according to your requirements and use it as your `env_file`.
|
||||
The values for each key currently match its default.
|
||||
|
||||
{% raw %}
|
||||
```
|
||||
########### BACKUP SCHEDULE
|
||||
|
||||
# Backups run on the given cron schedule in `busybox` flavor. If no
|
||||
# value is set, `@daily` will be used. If you do not want the cron
|
||||
# to ever run, use `0 0 5 31 2 ?`.
|
||||
# Backups can be run on fixed scheduled that are defined as a cron expression.
|
||||
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
|
||||
#
|
||||
# Field name | Mandatory? | Allowed values | Allowed special characters
|
||||
# ---------- | ---------- | -------------- | --------------------------
|
||||
# Seconds | No | 0-59 | * / , -
|
||||
# Minutes | Yes | 0-59 | * / , -
|
||||
# Hours | Yes | 0-23 | * / , -
|
||||
# Day of month | Yes | 1-31 | * / , - ?
|
||||
# Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||
# Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||
#
|
||||
# Month and Day-of-week field values are case insensitive.
|
||||
# "SUN", "Sun", and "sun" are equally accepted.
|
||||
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
|
||||
# Refer to sites like <https://crontab.guru> for help.
|
||||
# If no value is set, `@daily` will be used, which runs every
|
||||
# day at midnight.
|
||||
|
||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||
# BACKUP_CRON_EXPRESSION="@daily"
|
||||
|
||||
# ---
|
||||
|
||||
# Optional startup delay ("jitter") applied before each backup run.
|
||||
# The jitter introduces a random delay between 0 and the given duration,
|
||||
#
|
||||
# Set to "0s" or omit the variable to disable jitter completely.
|
||||
# Default = "0s". In case you need to adjust this value, supply a duration
|
||||
# value as per https://pkg.go.dev/time#ParseDuration to `BACKUP_JITTER`.
|
||||
#
|
||||
# BACKUP_JITTER="0s"
|
||||
|
||||
# ---
|
||||
|
||||
# The compression algorithm used in conjunction with tar.
|
||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
||||
# Note that the selection affects the file extension.
|
||||
# Valid options are: "gz" (Gzip), "zst" (Zstd) or "none" (tar only).
|
||||
# Default is "gz". Note that the selection affects the file extension.
|
||||
|
||||
# BACKUP_COMPRESSION="gz"
|
||||
|
||||
# ---
|
||||
|
||||
# Parallelism level for "gz" (Gzip) compression.
|
||||
# Defines how many blocks of data are concurrently processed.
|
||||
# Higher values result in faster compression. No effect on decompression
|
||||
# Default = 1. Setting this to 0 will use all available threads.
|
||||
|
||||
# GZIP_PARALLELISM=1
|
||||
# GZIP_PARALLELISM="1"
|
||||
|
||||
# The name of the backup file including the extension.
|
||||
# Format verbs will be replaced as in `strftime`. Omitting them
|
||||
# ---
|
||||
|
||||
# The desired name of the backup file including the extension.
|
||||
# Format verbs will be replaced as in `strftime`. Omitting all verbs
|
||||
# will result in the same filename for every backup run, which means previous
|
||||
# versions will be overwritten on subsequent runs.
|
||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
||||
# in which case it will become either "tar.gz", "tar.zst" or ".tar" (depending
|
||||
# on your BACKUP_COMPRESSION setting).
|
||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||
|
||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"
|
||||
|
||||
# ---
|
||||
|
||||
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
||||
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
||||
# BACKUP_PRUNING_PREFIX that will get expanded at runtime,
|
||||
@@ -63,10 +98,15 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_FILENAME_EXPAND="true"
|
||||
|
||||
# ---
|
||||
|
||||
# When storing local backups, a symlink to the latest backup can be created
|
||||
# in case a value is given for this key. This has no effect on remote backups.
|
||||
# Example: "backup.latest.tar.gz"
|
||||
|
||||
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
||||
# BACKUP_LATEST_SYMLINK=""
|
||||
|
||||
# ---
|
||||
|
||||
# ************************************************************************
|
||||
# The BACKUP_FROM_SNAPSHOT option has been deprecated and will be removed
|
||||
@@ -80,83 +120,112 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_FROM_SNAPSHOT="false"
|
||||
|
||||
# By default, the `/backup` directory inside the container will be backed up.
|
||||
# In case you need to use a custom location, set `BACKUP_SOURCES`.
|
||||
# ---
|
||||
|
||||
# BACKUP_SOURCES="/other/location"
|
||||
# By default, the contents of the `/backup` directory inside the container
|
||||
# will be backed up. In case you need to use a custom location, set `BACKUP_SOURCES`.
|
||||
# Example: "/other/location"
|
||||
|
||||
# When given, all files in BACKUP_SOURCES whose full path matches the given
|
||||
# BACKUP_SOURCES="/backup"
|
||||
|
||||
# ---
|
||||
|
||||
# When a value is given, all files in BACKUP_SOURCES whose full path matches the
|
||||
# regular expression will be excluded from the archive. Regular Expressions
|
||||
# can be used as from the Go standard library https://pkg.go.dev/regexp
|
||||
# Example: "\.log$"
|
||||
|
||||
# BACKUP_EXCLUDE_REGEXP="\.log$"
|
||||
# BACKUP_EXCLUDE_REGEXP=""
|
||||
|
||||
# ---
|
||||
|
||||
# Exclude one or many storage backends from the pruning process.
|
||||
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
|
||||
# E.g. with one backend excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3
|
||||
# E.g. with multiple backends excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3,webdav
|
||||
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
|
||||
# Note: The name of the backends is case insensitive.
|
||||
# Note: The names of the backends are case insensitive.
|
||||
# Default: All backends get pruned.
|
||||
|
||||
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=
|
||||
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=""
|
||||
|
||||
########### BACKUP STORAGE
|
||||
########### S3 COMPATIBLE STORAGE
|
||||
|
||||
# The name of the remote bucket that should be used for storing backups. If
|
||||
# this is not set, no remote backups will be stored.
|
||||
# Example: "backup-bucket"
|
||||
|
||||
# AWS_S3_BUCKET_NAME="backup-bucket"
|
||||
# AWS_S3_BUCKET_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# If you want to store the backup in a non-root location on your bucket
|
||||
# you can provide a path. The path must not contain a leading slash.
|
||||
# Example: "my/backup/location"
|
||||
|
||||
# AWS_S3_PATH="my/backup/location"
|
||||
# AWS_S3_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# Define credentials for authenticating against the backup storage and a bucket
|
||||
# name. Although all of these keys are `AWS`-prefixed, the setup can be used
|
||||
# with any S3 compatible storage.
|
||||
|
||||
# AWS_ACCESS_KEY_ID="<xxx>"
|
||||
# AWS_SECRET_ACCESS_KEY="<xxx>"
|
||||
# AWS_ACCESS_KEY_ID=""
|
||||
# AWS_SECRET_ACCESS_KEY=""
|
||||
|
||||
# ---
|
||||
|
||||
# Instead of providing static credentials, you can also use IAM instance profiles
|
||||
# or similar to provide authentication. Some possible configuration options on AWS:
|
||||
# - EC2: http://169.254.169.254
|
||||
# - ECS: http://169.254.170.2
|
||||
|
||||
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
|
||||
# AWS_IAM_ROLE_ENDPOINT=""
|
||||
|
||||
# ---
|
||||
|
||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||
# Do not set this when working against AWS S3 (the default value is
|
||||
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
||||
# will need to use the option below.
|
||||
# If you need to set a specific (non-https) protocol, you will need to use the option below.
|
||||
# The default value points to the standard AWS S3 endpoint.
|
||||
|
||||
# AWS_ENDPOINT="storage.example.com"
|
||||
# AWS_ENDPOINT="s3.amazonaws.com"
|
||||
|
||||
# The protocol to be used when communicating with your storage server.
|
||||
# ---
|
||||
|
||||
# The protocol to be used when communicating with your S3 storage server.
|
||||
# Defaults to "https". You can set this to "http" when communicating with
|
||||
# a different Docker container on the same host for example.
|
||||
# a different Docker container in the same virtual network for example.
|
||||
|
||||
# AWS_ENDPOINT_PROTO="https"
|
||||
|
||||
# ---
|
||||
|
||||
# Setting this variable to `true` will disable verification of
|
||||
# SSL certificates for AWS_ENDPOINT. You shouldn't use this unless you use
|
||||
# self-signed certificates for your remote storage backend. This can only be
|
||||
# used when AWS_ENDPOINT_PROTO is set to `https`.
|
||||
|
||||
# AWS_ENDPOINT_INSECURE="true"
|
||||
# AWS_ENDPOINT_INSECURE="false"
|
||||
|
||||
# ---
|
||||
|
||||
# If you wish to use self signed certificates your S3 server, you can pass
|
||||
# the location of a PEM encoded CA certificate and it will be used for
|
||||
# validating your certificates.
|
||||
# Alternatively, pass a PEM encoded string containing the certificate.
|
||||
# validating your certificates. Alternatively, pass a PEM encoded string
|
||||
# containing the certificate.
|
||||
# Example: "/path/to/cert.pem"
|
||||
|
||||
# AWS_ENDPOINT_CA_CERT="/path/to/cert.pem"
|
||||
# AWS_ENDPOINT_CA_CERT=""
|
||||
|
||||
# Setting this variable will change the S3 storage class header.
|
||||
# Defaults to "STANDARD", you can set this value according to your needs.
|
||||
# ---
|
||||
|
||||
# AWS_STORAGE_CLASS="GLACIER"
|
||||
# Setting a value for this key will change the S3 storage class header.
|
||||
# Default behavior is to use the standard class when no value is given.
|
||||
# Example: "GLACIER"
|
||||
|
||||
# AWS_STORAGE_CLASS=""
|
||||
|
||||
# ---
|
||||
|
||||
# Setting this variable will change the S3 default part size for the copy step.
|
||||
# This value is useful when you want to upload large files.
|
||||
@@ -165,106 +234,223 @@ You can populate below template according to your requirements and use it as you
|
||||
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
|
||||
# The unit is in MB and an integer.
|
||||
|
||||
# AWS_PART_SIZE=16
|
||||
# AWS_PART_SIZE="16"
|
||||
|
||||
# You can also backup files to any WebDAV server:
|
||||
########### WEBDAV STORAGE
|
||||
|
||||
# The URL of the remote WebDAV server
|
||||
# Example: "https://webdav.example.com"
|
||||
|
||||
# WEBDAV_URL="https://webdav.example.com"
|
||||
# WEBDAV_URL=""
|
||||
|
||||
# ---
|
||||
|
||||
# The Directory to place the backups to on the WebDAV server.
|
||||
# If the path is not present on the server it will be created.
|
||||
# Example: "/my/directory/"
|
||||
|
||||
# WEBDAV_PATH="/my/directory/"
|
||||
# WEBDAV_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# The username for the WebDAV server
|
||||
# Example: "user"
|
||||
|
||||
# WEBDAV_USERNAME="user"
|
||||
# WEBDAV_USERNAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The password for the WebDAV server
|
||||
# Example: "password"
|
||||
|
||||
# WEBDAV_PASSWORD="password"
|
||||
# WEBDAV_PASSWORD=""
|
||||
|
||||
# Setting this variable to `true` will disable verification of
|
||||
# ---
|
||||
|
||||
# Setting this variable to "true" will disable verification of
|
||||
# SSL certificates for WEBDAV_URL. You shouldn't use this unless you use
|
||||
# self-signed certificates for your remote storage backend.
|
||||
|
||||
# WEBDAV_URL_INSECURE="true"
|
||||
# WEBDAV_URL_INSECURE="false"
|
||||
|
||||
# You can also backup files to any SSH server:
|
||||
########### SSH/SFTP STORAGE
|
||||
|
||||
# The URL of the remote SSH server
|
||||
# The FQDN of the remote SSH server
|
||||
# Example: "server.local"
|
||||
|
||||
# SSH_HOST_NAME="server.local"
|
||||
# SSH_HOST_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The port of the remote SSH server
|
||||
# Optional variable default value is `22`
|
||||
|
||||
# SSH_PORT=2222
|
||||
# SSH_PORT="22"
|
||||
|
||||
# ---
|
||||
|
||||
# The Directory to place the backups to on the SSH server.
|
||||
# If the directory does not exist, it will be created automatically.
|
||||
# Example: "/home/user/backups"
|
||||
|
||||
# SSH_REMOTE_PATH="/my/directory/"
|
||||
# SSH_REMOTE_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# The username for the SSH server
|
||||
# Example: "user"
|
||||
|
||||
# SSH_USER="user"
|
||||
# SSH_USER=""
|
||||
|
||||
# ---
|
||||
|
||||
# The password for the SSH server
|
||||
# Example: "password"
|
||||
|
||||
# SSH_PASSWORD="password"
|
||||
# SSH_PASSWORD=""
|
||||
|
||||
# The private key path in container for SSH server
|
||||
# Default value: /root/.ssh/id_rsa
|
||||
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
|
||||
# also work.
|
||||
# ---
|
||||
|
||||
# The private key path in container for SSH server.
|
||||
# Consumers can mount a file into /root/.ssh/id_rsa (or the respective value)
|
||||
# in order to have it being used. Non-RSA keys (e.g. ed25519) will also work.
|
||||
|
||||
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
|
||||
|
||||
# The passphrase for the identity file
|
||||
# ---
|
||||
|
||||
# SSH_IDENTITY_PASSPHRASE="pass"
|
||||
# The passphrase for the identity file if applicable
|
||||
# Example: "pass"
|
||||
|
||||
# SSH_IDENTITY_PASSPHRASE=""
|
||||
|
||||
########### AZURE BLOB STORAGE
|
||||
|
||||
# The credential's account name when using Azure Blob Storage. This has to be
|
||||
# set when using Azure Blob Storage.
|
||||
# Example: "account-name"
|
||||
|
||||
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
|
||||
# AZURE_STORAGE_ACCOUNT_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The credential's primary account key when using Azure Blob Storage. If this
|
||||
# is not given, the command tries to fall back to using a managed identity.
|
||||
# is not given, the command tries to fall back to using a connection string
|
||||
# (if given) or a managed identity (if neither is set).
|
||||
|
||||
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
|
||||
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY=""
|
||||
|
||||
# ---
|
||||
|
||||
# A connection string for accessing Azure Blob Storage. If this
|
||||
# is not given, the command tries to fall back to using a primary account key
|
||||
# (if given) or a managed identity (if neither is set).
|
||||
|
||||
# AZURE_STORAGE_CONNECTION_STRING=""
|
||||
|
||||
# ---
|
||||
|
||||
# The container name when using Azure Blob Storage.
|
||||
# Example: "container-name"
|
||||
|
||||
# AZURE_STORAGE_CONTAINER_NAME="container-name"
|
||||
# AZURE_STORAGE_CONTAINER_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The service endpoint when using Azure Blob Storage. This is a template that
|
||||
# can be passed the account name as shown in the default value below.
|
||||
|
||||
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
|
||||
|
||||
# ---
|
||||
|
||||
# The access tier when using Azure Blob Storage. Possible values are
|
||||
# https://github.com/Azure/azure-sdk-for-go/blob/sdk/storage/azblob/v1.3.2/sdk/storage/azblob/internal/generated/zz_constants.go#L14-L30
|
||||
# Example: "Cold"
|
||||
|
||||
# AZURE_STORAGE_ACCESS_TIER=""
|
||||
|
||||
########### DROPBOX STORAGE
|
||||
|
||||
# Absolute remote path in your Dropbox where the backups shall be stored.
|
||||
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||
# Consulte the README for further information.
|
||||
# Consult the README for further information.
|
||||
# Example: "/my/directory"
|
||||
|
||||
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||
# DROPBOX_REMOTE_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# App key and app secret from your app created at https://www.dropbox.com/developers/apps
|
||||
|
||||
# DROPBOX_APP_KEY=""
|
||||
# DROPBOX_APP_SECRET=""
|
||||
|
||||
# ---
|
||||
|
||||
# Number of concurrent chunked uploads for Dropbox.
|
||||
# Values above 6 usually result in no enhancements.
|
||||
|
||||
# DROPBOX_CONCURRENCY_LEVEL="6"
|
||||
|
||||
# App key and app secret from your app created at https://www.dropbox.com/developers/apps/info
|
||||
|
||||
# DROPBOX_APP_KEY=""
|
||||
# DROPBOX_APP_SECRET=""
|
||||
# ---
|
||||
|
||||
# Refresh token to request new short-lived tokens (OAuth2). Consult README to see how to get one.
|
||||
|
||||
# DROPBOX_REFRESH_TOKEN=""
|
||||
|
||||
########### GOOGLE DRIVE STORAGE
|
||||
|
||||
# The JSON credentials for a Google service account with access to Google Drive.
|
||||
# You can provide either:
|
||||
# 1. The actual JSON content directly
|
||||
# 2. Use the _FILE suffix to load from a file (e.g., GOOGLE_DRIVE_CREDENTIALS_JSON_FILE)
|
||||
#
|
||||
# Examples:
|
||||
# Option 1 - JSON content:
|
||||
# docker run [...] \
|
||||
# -e GOOGLE_DRIVE_CREDENTIALS_JSON='{"type":"service_account",...}'
|
||||
#
|
||||
# Option 2 - Using _FILE suffix (recommended for Docker Secrets):
|
||||
# docker run [...] \
|
||||
# -v ./credentials.json:/creds/google-credentials.json \
|
||||
# -e GOOGLE_DRIVE_CREDENTIALS_JSON_FILE=/creds/google-credentials.json
|
||||
#
|
||||
# GOOGLE_DRIVE_CREDENTIALS_JSON=""
|
||||
|
||||
# ---
|
||||
|
||||
# The ID of the Google Drive folder where backups will be uploaded.
|
||||
# You can find the folder ID in the URL when viewing the folder in Google Drive.
|
||||
#
|
||||
# Example: "1A2B3C4D5E6F7G8H9I0J"
|
||||
#
|
||||
# GOOGLE_DRIVE_FOLDER_ID=""
|
||||
|
||||
# ---
|
||||
|
||||
# The email address of the user to impersonate when accessing Google Drive (domain-wide delegation).
|
||||
# This is required becasue your service account needs to act on behalf of a user in your organization in order to upload files.
|
||||
# How to: https://support.google.com/a/answer/162106
|
||||
# Example: "user@example.com"
|
||||
#
|
||||
# GOOGLE_DRIVE_IMPERSONATE_SUBJECT=""
|
||||
|
||||
# ---
|
||||
|
||||
# (Optional) Custom Google Drive API endpoint. This is primarily for testing with a mock server.
|
||||
# Example: "http://localhost:8080/drive/v3"
|
||||
#
|
||||
# GOOGLE_DRIVE_ENDPOINT=""
|
||||
|
||||
# ---
|
||||
|
||||
# (Optional) Custom token URL for Google Drive authentication. This is primarily for testing with a mock server.
|
||||
# Example: "http://localhost:8080/token"
|
||||
#
|
||||
# GOOGLE_DRIVE_TOKEN_URL=""
|
||||
|
||||
########### LOCAL FILE STORAGE
|
||||
|
||||
# In addition to storing backups remotely, you can also keep local copies.
|
||||
# Pass a container-local path to store your backups if needed. You also need to
|
||||
# mount a local folder or Docker volume into that location (`/archive`
|
||||
@@ -286,10 +472,12 @@ You can populate below template according to your requirements and use it as you
|
||||
# for such files, or to configure BACKUP_PRUNING_PREFIX to limit
|
||||
# removal to certain files.
|
||||
|
||||
# Define this value to enable automatic rotation of old backups. The value
|
||||
# declares the number of days for which a backup is kept.
|
||||
# Pass zero or a positive integer value to enable automatic rotation of
|
||||
# old backups. The value declares the number of days for which a backup is kept.
|
||||
|
||||
# BACKUP_RETENTION_DAYS="7"
|
||||
# BACKUP_RETENTION_DAYS="-1"
|
||||
|
||||
# ---
|
||||
|
||||
# In case the duration a backup takes fluctuates noticeably in your setup
|
||||
# you can adjust this setting to make sure there are no race conditions
|
||||
@@ -301,6 +489,8 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_PRUNING_LEEWAY="1m"
|
||||
|
||||
# ---
|
||||
|
||||
# In case your target bucket or directory contains other files than the ones
|
||||
# managed by this container, you can limit the scope of rotation by setting
|
||||
# a prefix value. This would usually be the non-parametrized part of your
|
||||
@@ -308,13 +498,37 @@ You can populate below template according to your requirements and use it as you
|
||||
# you can set BACKUP_PRUNING_PREFIX to `db-backup-` and make sure
|
||||
# unrelated files are not affected by the rotation mechanism.
|
||||
|
||||
# BACKUP_PRUNING_PREFIX="backup-"
|
||||
# BACKUP_PRUNING_PREFIX=""
|
||||
|
||||
########### BACKUP ENCRYPTION
|
||||
|
||||
# Backups can be encrypted using gpg in case a passphrase is given.
|
||||
# All of the encryption options are mutually exclusive. Provide a single option
|
||||
# for the encryption scheme of your choice.
|
||||
|
||||
# GPG_PASSPHRASE="<xxx>"
|
||||
# Backups can be encrypted symmetrically using gpg in case a passphrase is given.
|
||||
|
||||
# GPG_PASSPHRASE=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted asymmetrically using gpg in case publickeys are given.
|
||||
# You can use pipe syntax to pass a multiline value.
|
||||
|
||||
# GPG_PUBLIC_KEY_RING=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted symmetrically using age in case a passphrase is given.
|
||||
|
||||
# AGE_PASSPHRASE=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted asymmetrically using age in case publickeys are given.
|
||||
# Multiple keys need to be provided as a comma separated list. Right now, this
|
||||
# supports `age` and `ssh` keys
|
||||
|
||||
# AGE_PUBLIC_KEYS=""
|
||||
|
||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||
|
||||
@@ -322,18 +536,24 @@ You can populate below template according to your requirements and use it as you
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers and
|
||||
# services that are labeled with `true` will be stopped. If you need more fine
|
||||
# grained control (e.g. when running multiple containers based on this image),
|
||||
# you can override this default by specifying a different value here.
|
||||
# BACKUP_STOP_DURING_BACKUP_LABEL="service1"
|
||||
# you can override this default by specifying a different string value here.
|
||||
# BACKUP_STOP_DURING_BACKUP_LABEL="true"
|
||||
|
||||
# Containers or services can also be stopped for the duration of the backup
|
||||
# without being restarted afterwards by applying a
|
||||
# `docker-volume-backup.stop-during-backup-no-restart` label. This behaves the
|
||||
# same as `docker-volume-backup.stop-during-backup` but is mutually exclusive and
|
||||
# skips restarting the container or service once the backup has finished.
|
||||
# BACKUP_STOP_DURING_BACKUP_NO_RESTART_LABEL="true"
|
||||
|
||||
# When trying to scale down Docker Swarm services, give up after
|
||||
# the specified amount of time in case the service has not converged yet.
|
||||
# In case you need to adjust this timeout, supply a duration
|
||||
# value as per https://pkg.go.dev/time#ParseDuration to `BACKUP_STOP_SERVICE_TIMEOUT`.
|
||||
# Defaults to 5 minutes.
|
||||
|
||||
# BACKUP_STOP_SERVICE_TIMEOUT="5m"
|
||||
|
||||
########### EXECUTING COMMANDS IN CONTAINERS PRE/POST BACKUP
|
||||
########### EXECUTING COMMANDS IN CONTAINERS DURING THE BACKUP LIFECYCLE
|
||||
|
||||
# It is possible to define commands to be run in any container before and after
|
||||
# a backup is conducted. The commands themselves are defined in labels like
|
||||
@@ -344,29 +564,34 @@ You can populate below template according to your requirements and use it as you
|
||||
# is configured to be "true", command execution output will be forwarded to
|
||||
# the backup container's stdout and stderr.
|
||||
|
||||
# EXEC_FORWARD_OUTPUT="true"
|
||||
# EXEC_FORWARD_OUTPUT="false"
|
||||
|
||||
# ---
|
||||
|
||||
# Without any further configuration, all commands defined in labels will be
|
||||
# run before and after a backup. If you need more fine grained control, you
|
||||
# can use this option to set a label that will be used for narrowing down
|
||||
# the set of eligible containers. When set, an eligible container will also need
|
||||
# to be labeled as `docker-volume-backup.exec-label=database`.
|
||||
# the set of eligible containers. E.g. when setting this to `database`,
|
||||
# an eligible container will also need to be labeled as `docker-volume-backup.exec-label=database`.
|
||||
|
||||
# EXEC_LABEL="database"
|
||||
# EXEC_LABEL=""
|
||||
|
||||
########### NOTIFICATIONS
|
||||
|
||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||
# by `shoutrrr`: https://shoutrrr.nickfedor.com/v0.10.3/services/overview/
|
||||
# The content of such notifications can be customized. Dedicated documentation
|
||||
# on how to do this can be found in the README. When providing multiple URLs or
|
||||
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||
|
||||
# The below URL demonstrates how to send an email using the provided SMTP
|
||||
# The following example URL demonstrates how to send an email using the provided SMTP
|
||||
# configuration and credentials.
|
||||
# Example: "smtp://username:password@host:587/?fromAddress=sender@example.com&toAddresses=recipient@example.com"
|
||||
|
||||
# NOTIFICATION_URLS=smtp://username:password@host:587/?fromAddress=sender@example.com&toAddresses=recipient@example.com
|
||||
# NOTIFICATION_URLS=""
|
||||
|
||||
# ---
|
||||
|
||||
# By default, notifications would only be sent out when a backup run fails
|
||||
# To receive notifications for every run, set `NOTIFICATION_LEVEL` to `info`
|
||||
@@ -378,8 +603,9 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# If you are interfacing with Docker via TCP you can set the Docker host here
|
||||
# instead of mounting the Docker socket as a volume. This is unset by default.
|
||||
# Example: "tcp://docker_socket_proxy:2375"
|
||||
|
||||
# DOCKER_HOST="tcp://docker_socket_proxy:2375"
|
||||
# DOCKER_HOST=""
|
||||
|
||||
########### LOCK_TIMEOUT
|
||||
|
||||
@@ -406,20 +632,25 @@ You can populate below template according to your requirements and use it as you
|
||||
# The recipient(s) of the notification. Supply a comma separated list
|
||||
# of addresses if you want to notify multiple recipients. If this is
|
||||
# not set, no emails will be sent.
|
||||
# Example: "you@example.com"
|
||||
|
||||
# EMAIL_NOTIFICATION_RECIPIENT="you@example.com"
|
||||
# EMAIL_NOTIFICATION_RECIPIENT=""
|
||||
|
||||
# The "From" header of the sent email. Defaults to `noreply@nohost`.
|
||||
# ---
|
||||
|
||||
# EMAIL_NOTIFICATION_SENDER="no-reply@example.com"
|
||||
# The "From" header of the sent email.
|
||||
# Example: "no-reply@example.com"
|
||||
|
||||
# EMAIL_NOTIFICATION_SENDER="noreply@nohost"
|
||||
|
||||
# ---
|
||||
|
||||
# Configuration and credentials for the SMTP server to be used.
|
||||
# EMAIL_SMTP_PORT defaults to 587.
|
||||
|
||||
# EMAIL_SMTP_HOST="posteo.de"
|
||||
# EMAIL_SMTP_PASSWORD="<xxx>"
|
||||
# EMAIL_SMTP_USERNAME="no-reply@example.com"
|
||||
# EMAIL_SMTP_PORT="<port>"
|
||||
# EMAIL_SMTP_HOST=""
|
||||
# EMAIL_SMTP_PASSWORD=""
|
||||
# EMAIL_SMTP_USERNAME=""
|
||||
# EMAIL_SMTP_PORT="587"
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
|
||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
||||
else
|
||||
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
|
||||
|
||||
crontab -r && crontab /dev/null
|
||||
for file in /etc/dockervolumebackup/conf.d/*; do
|
||||
source $file
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
|
||||
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Starting cron in foreground."
|
||||
crond -f -d 8
|
||||
122
go.mod
122
go.mod
@@ -1,75 +1,111 @@
|
||||
module github.com/offen/docker-volume-backup
|
||||
|
||||
go 1.21
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
filippo.io/age v1.2.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/cosiner/argv v0.1.0
|
||||
github.com/docker/cli v24.0.1+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/klauspost/compress v1.17.6
|
||||
github.com/docker/cli v28.5.1+incompatible
|
||||
github.com/docker/docker v28.3.3+incompatible
|
||||
github.com/gofrs/flock v0.13.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/klauspost/compress v1.18.1
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||
github.com/minio/minio-go/v7 v7.0.66
|
||||
github.com/minio/minio-go/v7 v7.0.95
|
||||
github.com/nicholas-fedor/shoutrrr v0.11.0
|
||||
github.com/offen/envconfig v1.5.0
|
||||
github.com/otiai10/copy v1.14.0
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/studio-b12/gowebdav v0.9.0
|
||||
golang.org/x/crypto v0.18.0
|
||||
golang.org/x/oauth2 v0.16.0
|
||||
golang.org/x/sync v0.6.0
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/pkg/sftp v1.13.10
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/studio-b12/gowebdav v0.11.0
|
||||
golang.org/x/crypto v0.43.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
google.golang.org/api v0.253.0
|
||||
mvdan.cc/sh/v3 v3.12.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.17.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/minio/crc64nvme v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
github.com/philhofer/fwd v1.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f // indirect
|
||||
google.golang.org/grpc v1.76.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
golang.org/x/net v0.46.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
)
|
||||
|
||||
43
internal/errwrap/wrap.go
Normal file
43
internal/errwrap/wrap.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package errwrap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Wrap wraps the given error using the given message while prepending
|
||||
// the name of the calling function, creating a poor man's stack trace
|
||||
func Wrap(err error, msg string) error {
|
||||
pc := make([]uintptr, 15)
|
||||
n := runtime.Callers(2, pc)
|
||||
frames := runtime.CallersFrames(pc[:n])
|
||||
frame, _ := frames.Next()
|
||||
// strip full import paths and just use the package name
|
||||
chunks := strings.Split(frame.Function, "/")
|
||||
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
||||
if err == nil {
|
||||
return errors.New(withCaller)
|
||||
}
|
||||
return fmt.Errorf("%s: %w", withCaller, err)
|
||||
}
|
||||
|
||||
// Unwrap receives an error and returns the last error in the chain of
|
||||
// wrapped errors
|
||||
func Unwrap(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
u := errors.Unwrap(err)
|
||||
if u == nil {
|
||||
break
|
||||
}
|
||||
err = u
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package azure
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -17,13 +18,17 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
)
|
||||
|
||||
type azureBlobStorage struct {
|
||||
*storage.StorageBackend
|
||||
client *azblob.Client
|
||||
uploadStreamOptions *blockblob.UploadStreamOptions
|
||||
containerName string
|
||||
}
|
||||
|
||||
@@ -32,19 +37,25 @@ type Config struct {
|
||||
AccountName string
|
||||
ContainerName string
|
||||
PrimaryAccountKey string
|
||||
ConnectionString string
|
||||
Endpoint string
|
||||
RemotePath string
|
||||
AccessTier string
|
||||
}
|
||||
|
||||
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||
if opts.PrimaryAccountKey != "" && opts.ConnectionString != "" {
|
||||
return nil, errwrap.Wrap(nil, "using primary account key and connection string are mutually exclusive")
|
||||
}
|
||||
|
||||
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
||||
}
|
||||
var ep bytes.Buffer
|
||||
if err := endpointTemplate.Execute(&ep, opts); err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error executing endpoint template")
|
||||
}
|
||||
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
|
||||
|
||||
@@ -52,26 +63,48 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
if opts.PrimaryAccountKey != "" {
|
||||
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating shared key Azure credential")
|
||||
}
|
||||
|
||||
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from primary account key")
|
||||
}
|
||||
} else if opts.ConnectionString != "" {
|
||||
client, err = azblob.NewClientFromConnectionString(opts.ConnectionString, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from connection string")
|
||||
}
|
||||
} else {
|
||||
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating managed identity credential")
|
||||
}
|
||||
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from managed identity")
|
||||
}
|
||||
}
|
||||
|
||||
var uploadStreamOptions *blockblob.UploadStreamOptions
|
||||
if opts.AccessTier != "" {
|
||||
var found bool
|
||||
for _, t := range blob.PossibleAccessTierValues() {
|
||||
if string(t) == opts.AccessTier {
|
||||
found = true
|
||||
uploadStreamOptions = &blockblob.UploadStreamOptions{
|
||||
AccessTier: &t,
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", opts.AccessTier))
|
||||
}
|
||||
}
|
||||
|
||||
storage := azureBlobStorage{
|
||||
client: client,
|
||||
uploadStreamOptions: uploadStreamOptions,
|
||||
containerName: opts.ContainerName,
|
||||
StorageBackend: &storage.StorageBackend{
|
||||
DestinationPath: opts.RemotePath,
|
||||
@@ -90,17 +123,18 @@ func (b *azureBlobStorage) Name() string {
|
||||
func (b *azureBlobStorage) Copy(file string) error {
|
||||
fileReader, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
||||
}
|
||||
|
||||
_, err = b.client.UploadStream(
|
||||
context.Background(),
|
||||
b.containerName,
|
||||
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
||||
path.Join(b.DestinationPath, filepath.Base(file)),
|
||||
fileReader,
|
||||
nil,
|
||||
b.uploadStreamOptions,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -108,7 +142,7 @@ func (b *azureBlobStorage) Copy(file string) error {
|
||||
// Prune rotates away backups according to the configuration and provided
|
||||
// deadline for the Azure Blob storage backend.
|
||||
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
|
||||
lookupPrefix := path.Join(b.DestinationPath, pruningPrefix)
|
||||
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
|
||||
Prefix: &lookupPrefix,
|
||||
})
|
||||
@@ -117,7 +151,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error paging over blobs")
|
||||
}
|
||||
for _, v := range resp.Segment.BlobItems {
|
||||
totalCount++
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -51,7 +51,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
||||
token, err := tkSource.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error refreshing token")
|
||||
}
|
||||
|
||||
dbxConfig := dropbox.Config{
|
||||
@@ -87,7 +87,7 @@ func (b *dropboxStorage) Name() string {
|
||||
}
|
||||
|
||||
// Copy copies the given file to the WebDav storage backend.
|
||||
func (b *dropboxStorage) Copy(file string) error {
|
||||
func (b *dropboxStorage) Copy(file string) (returnErr error) {
|
||||
_, name := path.Split(file)
|
||||
|
||||
folderArg := files.NewCreateFolderArg(b.DestinationPath)
|
||||
@@ -95,29 +95,34 @@ func (b *dropboxStorage) Copy(file string) error {
|
||||
switch err := err.(type) {
|
||||
case files.CreateFolderV2APIError:
|
||||
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||
return
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
||||
default:
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||
return
|
||||
}
|
||||
defer r.Close()
|
||||
defer func() {
|
||||
returnErr = r.Close()
|
||||
}()
|
||||
|
||||
// Start new upload session and get session id
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||
|
||||
var sessionId string
|
||||
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
||||
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error starting the upload session")
|
||||
return
|
||||
} else {
|
||||
sessionId = res.SessionId
|
||||
}
|
||||
@@ -165,7 +170,7 @@ loop:
|
||||
|
||||
bytesRead, err := r.Read(chunk)
|
||||
if err != nil {
|
||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err)
|
||||
errorChn <- errwrap.Wrap(err, "error reading the file to be uploaded")
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
@@ -184,7 +189,7 @@ loop:
|
||||
mu.Unlock()
|
||||
|
||||
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err)
|
||||
errorChn <- errwrap.Wrap(err, "error appending the file to the upload session")
|
||||
return
|
||||
}
|
||||
}()
|
||||
@@ -195,10 +200,11 @@ loop:
|
||||
_, err = b.client.UploadSessionFinish(
|
||||
files.NewUploadSessionFinishArg(
|
||||
files.NewUploadSessionCursor(sessionId, 0),
|
||||
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||
files.NewCommitInfo(path.Join(b.DestinationPath, name)),
|
||||
), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error finishing the upload session")
|
||||
return
|
||||
}
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||
@@ -211,14 +217,14 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
||||
var entries []files.IsMetadata
|
||||
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||
}
|
||||
entries = append(entries, res.Entries...)
|
||||
|
||||
for res.HasMore {
|
||||
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||
}
|
||||
entries = append(entries, res.Entries...)
|
||||
}
|
||||
@@ -247,8 +253,8 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(path.Join(b.DestinationPath, match.Name))); err != nil {
|
||||
return errwrap.Wrap(err, "error removing file from Dropbox storage")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
178
internal/storage/googledrive/googledrive.go
Normal file
178
internal/storage/googledrive/googledrive.go
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2025 - The Gemini CLI authors <gemini-cli@google.com>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package googledrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/option"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type googleDriveStorage struct {
|
||||
storage.StorageBackend
|
||||
client *drive.Service
|
||||
}
|
||||
|
||||
// Config allows to configure a Google Drive storage backend.
|
||||
type Config struct {
|
||||
CredentialsJSON string
|
||||
FolderID string
|
||||
ImpersonateSubject string
|
||||
Endpoint string
|
||||
TokenURL string
|
||||
}
|
||||
|
||||
// NewStorageBackend creates and initializes a new Google Drive storage backend.
|
||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
credentialsBytes := []byte(opts.CredentialsJSON)
|
||||
|
||||
config, err := google.JWTConfigFromJSON(credentialsBytes, drive.DriveScope)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "unable to parse credentials")
|
||||
}
|
||||
if opts.ImpersonateSubject != "" {
|
||||
config.Subject = opts.ImpersonateSubject
|
||||
}
|
||||
if opts.TokenURL != "" {
|
||||
config.TokenURL = opts.TokenURL
|
||||
}
|
||||
|
||||
var clientOptions []option.ClientOption
|
||||
if opts.Endpoint != "" {
|
||||
clientOptions = append(clientOptions, option.WithEndpoint(opts.Endpoint))
|
||||
// Insecure transport for http mock server
|
||||
insecureTransport := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
insecureClient := &http.Client{Transport: insecureTransport}
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, insecureClient)
|
||||
}
|
||||
clientOptions = append(clientOptions, option.WithTokenSource(config.TokenSource(ctx)))
|
||||
|
||||
srv, err := drive.NewService(ctx, clientOptions...)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "unable to create Drive client")
|
||||
}
|
||||
|
||||
return &googleDriveStorage{
|
||||
StorageBackend: storage.StorageBackend{
|
||||
DestinationPath: opts.FolderID,
|
||||
Log: logFunc,
|
||||
},
|
||||
client: srv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the name of the storage backend
|
||||
func (b *googleDriveStorage) Name() string {
|
||||
return "GoogleDrive"
|
||||
}
|
||||
|
||||
// Copy copies the given file to the Google Drive storage backend.
|
||||
func (b *googleDriveStorage) Copy(file string) (returnErr error) {
|
||||
_, name := filepath.Split(file)
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload for backup '%s'.", name)
|
||||
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("failed to open file %s", file))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
returnErr = f.Close()
|
||||
}()
|
||||
|
||||
driveFile := &drive.File{Name: name}
|
||||
if b.DestinationPath != "" {
|
||||
driveFile.Parents = []string{b.DestinationPath}
|
||||
} else {
|
||||
driveFile.Parents = []string{"root"}
|
||||
}
|
||||
|
||||
createCall := b.client.Files.Create(driveFile).SupportsAllDrives(true).Fields("id")
|
||||
created, err := createCall.Media(f).Do()
|
||||
if err != nil {
|
||||
returnErr = errwrap.Wrap(err, fmt.Sprintf("failed to upload %s", name))
|
||||
return
|
||||
}
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Finished upload for %s. File ID: %s", name, created.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prune rotates away backups according to the configuration and provided deadline for the Google Drive storage backend.
|
||||
func (b *googleDriveStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
parentID := b.DestinationPath
|
||||
if parentID == "" {
|
||||
parentID = "root"
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("name contains '%s' and trashed = false", pruningPrefix)
|
||||
if parentID != "root" {
|
||||
query = fmt.Sprintf("'%s' in parents and (%s)", parentID, query)
|
||||
}
|
||||
|
||||
var allFiles []*drive.File
|
||||
pageToken := ""
|
||||
for {
|
||||
req := b.client.Files.List().Q(query).SupportsAllDrives(true).Fields("files(id, name, createdTime, parents)").PageToken(pageToken)
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "listing files")
|
||||
}
|
||||
allFiles = append(allFiles, res.Files...)
|
||||
pageToken = res.NextPageToken
|
||||
if pageToken == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var matches []*drive.File
|
||||
var lenCandidates int
|
||||
for _, f := range allFiles {
|
||||
if !strings.HasPrefix(f.Name, pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
lenCandidates++
|
||||
created, err := time.Parse(time.RFC3339, f.CreatedTime)
|
||||
if err != nil {
|
||||
b.Log(storage.LogLevelWarning, b.Name(), "Could not parse time for backup %s: %v", f.Name, err)
|
||||
continue
|
||||
}
|
||||
if created.Before(deadline) {
|
||||
matches = append(matches, f)
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(lenCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||
for _, file := range matches {
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Deleting old backup file: %s", file.Name)
|
||||
if err := b.client.Files.Delete(file.Id).SupportsAllDrives(true).Do(); err != nil {
|
||||
b.Log(storage.LogLevelWarning, b.Name(), "Error deleting %s: %v", file.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return stats, pruneErr
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package local
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
)
|
||||
|
||||
@@ -47,17 +48,19 @@ func (b *localStorage) Copy(file string) error {
|
||||
_, name := path.Split(file)
|
||||
|
||||
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
|
||||
return errwrap.Wrap(err, "error copying file to archive")
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
||||
|
||||
if b.latestSymlink != "" {
|
||||
symlink := path.Join(b.DestinationPath, b.latestSymlink)
|
||||
if _, err := os.Lstat(symlink); err == nil {
|
||||
os.Remove(symlink)
|
||||
if err := os.Remove(symlink); err != nil {
|
||||
return errwrap.Wrap(err, "error removing existing symlink")
|
||||
}
|
||||
}
|
||||
if err := os.Symlink(name, symlink); err != nil {
|
||||
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
|
||||
return errwrap.Wrap(err, "error creating latest symlink")
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
|
||||
}
|
||||
@@ -73,10 +76,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
)
|
||||
globMatches, err := filepath.Glob(globPattern)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
|
||||
globPattern,
|
||||
return nil, errwrap.Wrap(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"error looking up matching files using pattern %s",
|
||||
globPattern,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -84,14 +89,16 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
for _, candidate := range globMatches {
|
||||
fi, err := os.Lstat(candidate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
|
||||
candidate,
|
||||
return nil, errwrap.Wrap(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"error calling Lstat on file %s",
|
||||
candidate,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
if !fi.IsDir() && fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
}
|
||||
@@ -100,10 +107,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
for _, candidate := range candidates {
|
||||
fi, err := os.Stat(candidate)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"(*localStorage).Prune: Error calling stat on file %s: %w",
|
||||
candidate,
|
||||
return nil, errwrap.Wrap(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"error calling stat on file %s",
|
||||
candidate,
|
||||
),
|
||||
)
|
||||
}
|
||||
if fi.ModTime().Before(deadline) {
|
||||
@@ -124,10 +133,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
}
|
||||
}
|
||||
if len(removeErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
|
||||
len(removeErrors),
|
||||
return errwrap.Wrap(
|
||||
errors.Join(removeErrors...),
|
||||
fmt.Sprintf(
|
||||
"%d error(s) deleting files",
|
||||
len(removeErrors),
|
||||
),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
@@ -137,22 +148,25 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
}
|
||||
|
||||
// copy creates a copy of the file located at `dst` at `src`.
|
||||
func copyFile(src, dst string) error {
|
||||
func copyFile(src, dst string) (returnErr error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
returnErr = err
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
defer func() {
|
||||
returnErr = in.Close()
|
||||
}()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
returnErr = err
|
||||
return
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
out.Close()
|
||||
return err
|
||||
return errors.Join(err, out.Close())
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package s3
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
)
|
||||
|
||||
@@ -53,7 +53,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
} else if opts.IamRoleEndpoint != "" {
|
||||
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
||||
} else {
|
||||
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||
return nil, errwrap.Wrap(nil, "AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||
}
|
||||
|
||||
options := minio.Options{
|
||||
@@ -63,12 +63,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
|
||||
transport, err := minio.DefaultTransport(true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
|
||||
return nil, errwrap.Wrap(err, "failed to create default minio transport")
|
||||
}
|
||||
|
||||
if opts.EndpointInsecure {
|
||||
if !options.Secure {
|
||||
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||
return nil, errwrap.Wrap(nil, "AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||
}
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
} else if opts.CACert != nil {
|
||||
@@ -81,7 +81,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
|
||||
mc, err := minio.New(opts.Endpoint, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error setting up minio client")
|
||||
}
|
||||
|
||||
return &s3Storage{
|
||||
@@ -112,27 +112,30 @@ func (b *s3Storage) Copy(file string) error {
|
||||
if b.partSize > 0 {
|
||||
srcFileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
|
||||
return errwrap.Wrap(err, "error reading the local file")
|
||||
}
|
||||
|
||||
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
|
||||
return errwrap.Wrap(err, "error computing the optimal s3 part size")
|
||||
}
|
||||
|
||||
putObjectOptions.PartSize = uint64(partSize)
|
||||
}
|
||||
|
||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, path.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||
return fmt.Errorf(
|
||||
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||
return errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf(
|
||||
"error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||
errResp.Message,
|
||||
errResp.Code,
|
||||
errResp.StatusCode,
|
||||
),
|
||||
)
|
||||
}
|
||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
||||
return errwrap.Wrap(err, "error uploading backup to remote storage")
|
||||
}
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
||||
@@ -143,7 +146,7 @@ func (b *s3Storage) Copy(file string) error {
|
||||
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
|
||||
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
|
||||
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
|
||||
Prefix: path.Join(b.DestinationPath, pruningPrefix),
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
@@ -152,9 +155,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
||||
for candidate := range candidates {
|
||||
lenCandidates++
|
||||
if candidate.Err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
||||
return nil, errwrap.Wrap(
|
||||
candidate.Err,
|
||||
"error looking up candidates from remote storage",
|
||||
)
|
||||
}
|
||||
if candidate.LastModified.Before(deadline) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package ssh
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
@@ -47,20 +47,20 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
||||
key, err := os.ReadFile(opts.IdentityFile)
|
||||
if err != nil {
|
||||
return nil, errors.New("NewStorageBackend: error reading the private key")
|
||||
return nil, errwrap.Wrap(nil, "error reading the private key")
|
||||
}
|
||||
|
||||
var signer ssh.Signer
|
||||
if opts.IdentityPassphrase != "" {
|
||||
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
|
||||
if err != nil {
|
||||
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
|
||||
return nil, errwrap.Wrap(nil, "error parsing the encrypted private key")
|
||||
}
|
||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||
} else {
|
||||
signer, err = ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, errors.New("NewStorageBackend: error parsing the private key")
|
||||
return nil, errwrap.Wrap(nil, "error parsing the private key")
|
||||
}
|
||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating ssh client")
|
||||
}
|
||||
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
||||
if err != nil {
|
||||
@@ -87,7 +87,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
sftp.MaxConcurrentRequestsPerFile(64),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error creating sftp client")
|
||||
}
|
||||
|
||||
return &sshStorage{
|
||||
@@ -107,19 +107,29 @@ func (b *sshStorage) Name() string {
|
||||
}
|
||||
|
||||
// Copy copies the given file to the SSH storage backend.
|
||||
func (b *sshStorage) Copy(file string) error {
|
||||
func (b *sshStorage) Copy(file string) (returnErr error) {
|
||||
if err := b.sftpClient.MkdirAll(b.DestinationPath); err != nil {
|
||||
return errwrap.Wrap(err, "error ensuring destination directory")
|
||||
}
|
||||
|
||||
source, err := os.Open(file)
|
||||
_, name := path.Split(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
|
||||
returnErr = errwrap.Wrap(err, " error reading the file to be uploaded")
|
||||
return
|
||||
}
|
||||
defer source.Close()
|
||||
defer func() {
|
||||
returnErr = source.Close()
|
||||
}()
|
||||
|
||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||
destination, err := b.sftpClient.Create(path.Join(b.DestinationPath, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error creating file")
|
||||
return
|
||||
}
|
||||
defer destination.Close()
|
||||
defer func() {
|
||||
returnErr = destination.Close()
|
||||
}()
|
||||
|
||||
chunk := make([]byte, 1e9)
|
||||
for {
|
||||
@@ -127,27 +137,32 @@ func (b *sshStorage) Copy(file string) error {
|
||||
if err == io.EOF {
|
||||
tot, err := destination.Write(chunk[:num])
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error uploading the file")
|
||||
return
|
||||
}
|
||||
|
||||
if tot != len(chunk[:num]) {
|
||||
return errors.New("(*sshStorage).Copy: failed to write stream")
|
||||
returnErr = errwrap.Wrap(nil, "failed to write stream")
|
||||
return
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error uploading the file")
|
||||
return
|
||||
}
|
||||
|
||||
tot, err := destination.Write(chunk[:num])
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
returnErr = errwrap.Wrap(err, "error uploading the file")
|
||||
return
|
||||
}
|
||||
|
||||
if tot != len(chunk[:num]) {
|
||||
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
|
||||
returnErr = errwrap.Wrap(nil, "failed to write stream")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,28 +175,36 @@ func (b *sshStorage) Copy(file string) error {
|
||||
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
|
||||
// If directory doesn't exist yet, nothing to prune
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return &storage.PruneStats{}, nil
|
||||
}
|
||||
return nil, errwrap.Wrap(err, "error reading directory")
|
||||
}
|
||||
|
||||
var matches []string
|
||||
var numCandidates int
|
||||
for _, candidate := range candidates {
|
||||
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
if candidate.IsDir() || !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
numCandidates++
|
||||
if candidate.ModTime().Before(deadline) {
|
||||
matches = append(matches, candidate.Name())
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(len(candidates)),
|
||||
Total: uint(numCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), numCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
|
||||
p := path.Join(b.DestinationPath, match)
|
||||
if err := b.sftpClient.Remove(p); err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error removing file %s", p))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// Backend is an interface for defining functions which all storage providers support.
|
||||
@@ -26,7 +27,6 @@ type LogLevel int
|
||||
const (
|
||||
LogLevelInfo LogLevel = iota
|
||||
LogLevelWarning
|
||||
LogLevelError
|
||||
)
|
||||
|
||||
type Log func(logType LogLevel, context string, msg string, params ...any)
|
||||
@@ -47,7 +47,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int,
|
||||
|
||||
formattedDeadline, err := deadline.Local().MarshalText()
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*StorageBackend).DoPrune: error marshaling deadline: %w", err)
|
||||
return errwrap.Wrap(err, "error marshaling deadline")
|
||||
}
|
||||
b.Log(LogLevelInfo, context,
|
||||
"Pruned %d out of %d backups as they were older than the given deadline of %s.",
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"github.com/studio-b12/gowebdav"
|
||||
)
|
||||
@@ -36,14 +35,14 @@ type Config struct {
|
||||
// NewStorageBackend creates and initializes a new WebDav storage backend.
|
||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||
if opts.Username == "" || opts.Password == "" {
|
||||
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
|
||||
return nil, errwrap.Wrap(nil, "WEBDAV_URL is defined, but no credentials were provided")
|
||||
} else {
|
||||
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
|
||||
|
||||
if opts.URLInsecure {
|
||||
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||
if !ok {
|
||||
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
|
||||
return nil, errwrap.Wrap(nil, "unexpected error when asserting type for http.DefaultTransport")
|
||||
}
|
||||
webdavTransport := defaultTransport.Clone()
|
||||
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
|
||||
@@ -69,16 +68,16 @@ func (b *webDavStorage) Name() string {
|
||||
func (b *webDavStorage) Copy(file string) error {
|
||||
_, name := path.Split(file)
|
||||
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s' on server", b.DestinationPath))
|
||||
}
|
||||
|
||||
r, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
|
||||
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||
}
|
||||
|
||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
|
||||
if err := b.client.WriteStream(path.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||
return errwrap.Wrap(err, "error uploading the file")
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||
|
||||
@@ -89,29 +88,30 @@ func (b *webDavStorage) Copy(file string) error {
|
||||
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates, err := b.client.ReadDir(b.DestinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
|
||||
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||
}
|
||||
|
||||
var matches []fs.FileInfo
|
||||
var lenCandidates int
|
||||
var numCandidates int
|
||||
for _, candidate := range candidates {
|
||||
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
if candidate.IsDir() || !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
lenCandidates++
|
||||
numCandidates++
|
||||
if candidate.ModTime().Before(deadline) {
|
||||
matches = append(matches, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(lenCandidates),
|
||||
Total: uint(numCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), numCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
|
||||
if err := b.client.Remove(path.Join(b.DestinationPath, match.Name())); err != nil {
|
||||
return errwrap.Wrap(err, "error removing file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
FROM docker:24-dind
|
||||
FROM docker:28-dind
|
||||
|
||||
RUN apk add \
|
||||
age \
|
||||
coreutils \
|
||||
curl \
|
||||
expect \
|
||||
gpg \
|
||||
gpg-agent \
|
||||
jq \
|
||||
moreutils \
|
||||
tar \
|
||||
|
||||
@@ -39,14 +39,6 @@ Setting this value lets you run tests against different existing images, so you
|
||||
IMAGE_TAG=v2.30.0 ./test.sh
|
||||
```
|
||||
|
||||
#### `NO_IMAGE_CACHE`
|
||||
|
||||
When set, images from remote registries will not be cached and shared between sandbox containers.
|
||||
|
||||
```sh
|
||||
NO_IMAGE_CACHE=1 ./test.sh
|
||||
```
|
||||
|
||||
By default, two local images are created that persist the image data and provide it to containers at runtime.
|
||||
|
||||
## Understanding the test setup
|
||||
@@ -57,8 +49,8 @@ As the sandbox container is also expected to be torn down post test, the scripts
|
||||
|
||||
## Anatomy of a test case
|
||||
|
||||
The `test.sh` script looks for an exectuable file called `run.sh` in each directory.
|
||||
When found, it is executed and signals success by returning a 0 exit code.
|
||||
The `test.sh` script looks for all exectuable files in each directory.
|
||||
When found, all of them are executed in series and are expected to signal success by returning a 0 exit code.
|
||||
Any other exit code is considered a failure and will halt execution of further tests.
|
||||
|
||||
There is an `util.sh` file containing a few commonly used helpers which can be used by putting the following prelude to a new test case:
|
||||
@@ -68,3 +60,13 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
```
|
||||
|
||||
### Running tests in swarm mode
|
||||
|
||||
A test case can signal it wants to run in swarm mode by placing an empty `.swarm` file inside the directory.
|
||||
In case the swarm setup should be compose of multiple nodes, a `.multinode` file can be used.
|
||||
|
||||
A multinode setup will contain one manager (`manager`) and two worker nodes (`worker1` and `worker2`).
|
||||
|
||||
If a test is expected to run in the context of a node other than the `manager`, you can create a `.context` file containing the name of the node you want the test to run in.
|
||||
E.g. if your script `02run.sh` is expected to be run on `worker2`, create a file called `02run.sh.context` with the content `worker2`
|
||||
|
||||
24
test/age-passphrase/docker-compose.yml
Normal file
24
test/age-passphrase/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
AGE_PASSPHRASE: "Dance.0Tonight.Go.Typical"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
39
test/age-passphrase/run.sh
Executable file
39
test/age-passphrase/run.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename "$(pwd)")
|
||||
|
||||
export LOCAL_DIR="$(mktemp -d)"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# complex usage of expect(1) due to age not have a way to programmatically
|
||||
# provide the passphrase
|
||||
expect -i <<EOL
|
||||
spawn age --decrypt -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||
expect -exact "Enter passphrase: "
|
||||
send -- "Dance.0Tonight.Go.Typical\r"
|
||||
sleep 1
|
||||
EOL
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
1
test/age-publickey/.gitignore
vendored
Normal file
1
test/age-publickey/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
pk-*.txt
|
||||
24
test/age-publickey/docker-compose.yml
Normal file
24
test/age-publickey/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
AGE_PUBLIC_KEYS: "${BACKUP_AGE_PUBLIC_KEYS}"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
47
test/age-publickey/run.sh
Executable file
47
test/age-publickey/run.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename "$(pwd)")
|
||||
|
||||
export LOCAL_DIR="$(mktemp -d)"
|
||||
|
||||
age-keygen >"$LOCAL_DIR/pk-a.txt"
|
||||
PK_A="$(grep -E 'public key' <"$LOCAL_DIR/pk-a.txt" | cut -d: -f2 | xargs)"
|
||||
age-keygen >"$LOCAL_DIR/pk-b.txt"
|
||||
PK_B="$(grep -E 'public key' <"$LOCAL_DIR/pk-b.txt" | cut -d: -f2 | xargs)"
|
||||
|
||||
ssh-keygen -t ed25519 -m pem -f "$LOCAL_DIR/id_ed25519" -C "docker-volume-backup@local"
|
||||
PK_C="$(cat $LOCAL_DIR/id_ed25519.pub)"
|
||||
|
||||
export BACKUP_AGE_PUBLIC_KEYS="$PK_A,$PK_B,$PK_C"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
do_decrypt() {
|
||||
TMP_DIR=$(mktemp -d)
|
||||
age --decrypt -i "$1" -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
}
|
||||
|
||||
do_decrypt "$LOCAL_DIR/pk-a.txt"
|
||||
do_decrypt "$LOCAL_DIR/pk-b.txt"
|
||||
do_decrypt "$LOCAL_DIR/id_ed25519"
|
||||
@@ -1,8 +1,6 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
storage:
|
||||
image: mcr.microsoft.com/azure-storage/azurite:3.26.0
|
||||
image: mcr.microsoft.com/azure-storage/azurite:3.35.0
|
||||
volumes:
|
||||
- ${DATA_DIR:-./data}:/data
|
||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
||||
@@ -12,7 +10,7 @@ services:
|
||||
retries: 30
|
||||
|
||||
az_cli:
|
||||
image: mcr.microsoft.com/azure-cli:2.51.0
|
||||
image: mcr.microsoft.com/azure-cli:2.78.0
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/dump
|
||||
command:
|
||||
@@ -36,6 +34,7 @@ services:
|
||||
AZURE_STORAGE_CONTAINER_NAME: test-container
|
||||
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
||||
AZURE_STORAGE_PATH: 'path/to/backup'
|
||||
AZURE_STORAGE_ACCESS_TIER: Hot
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
@@ -43,7 +42,7 @@ services:
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
hostname: minio.local
|
||||
@@ -32,7 +30,7 @@ services:
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${CERT_DIR:-.}/rootCA.crt:/root/minio-rootCA.crt
|
||||
|
||||
offen:
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v empty_data:/backup/empty_data \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock:ro \
|
||||
--env AWS_ACCESS_KEY_ID=test \
|
||||
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||
--env AWS_ENDPOINT=minio:9000 \
|
||||
|
||||
0
test/collision/.swarm
Normal file
0
test/collision/.swarm
Normal file
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -11,7 +9,7 @@ services:
|
||||
volumes:
|
||||
- offen_data:/backup/offen_data:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -8,8 +8,6 @@ current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
|
||||
@@ -31,32 +31,3 @@ fi
|
||||
pass "Did not find unexpected file."
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
info "Running commands test in swarm mode next."
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
info "Backup container not ready yet. Retrying."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 20
|
||||
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $TMP_DIR
|
||||
if [ ! -f "$TMP_DIR/backup/data/dump.sql" ]; then
|
||||
fail "Could not find file written by pre command."
|
||||
fi
|
||||
pass "Found expected file."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/post.txt" ]; then
|
||||
fail "File created in post command was present in backup."
|
||||
fi
|
||||
pass "Did not find unexpected file."
|
||||
34
test/commands/02swarm.sh
Executable file
34
test/commands/02swarm.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
info "Backup container not ready yet. Retrying."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 20
|
||||
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $TMP_DIR
|
||||
if [ ! -f "$TMP_DIR/backup/data/dump.sql" ]; then
|
||||
fail "Could not find file written by pre command."
|
||||
fi
|
||||
pass "Found expected file."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/post.txt" ]; then
|
||||
fail "File created in post command was present in backup."
|
||||
fi
|
||||
pass "Did not find unexpected file."
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
database:
|
||||
image: mariadb:10.7
|
||||
@@ -44,7 +42,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
BACKUP_FILENAME="conf.tar.gz"
|
||||
# This is a comment
|
||||
# NOT=$(docker ps -aq)
|
||||
# e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`. Expansion happens before`
|
||||
|
||||
NAME="$EXPANSION_VALUE"
|
||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
BACKUP_FILENAME="other.tar.gz"
|
||||
NAME="other"
|
||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||
BACKUP_FILENAME="override-$NAME.tar.gz"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
BACKUP_FILENAME="never.tar.gz"
|
||||
NAME="never"
|
||||
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_FILENAME: $$NAME.tar.gz
|
||||
BACKUP_FILENAME_EXPAND: 'true'
|
||||
EXPANSION_VALUE: conf
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -13,12 +13,14 @@ docker compose up -d --quiet-pull
|
||||
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||
sleep 100
|
||||
|
||||
docker compose logs backup
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
||||
fail "Config from file was not used."
|
||||
fi
|
||||
pass "Config from file was used."
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/other.tar.gz" ]; then
|
||||
if [ ! -f "$LOCAL_DIR/override-other.tar.gz" ]; then
|
||||
fail "Run on same schedule did not succeed."
|
||||
fi
|
||||
pass "Run on same schedule succeeded."
|
||||
|
||||
30
test/docker-compose.yml
Normal file
30
test/docker-compose.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
services:
|
||||
manager: &node
|
||||
hostname: manager
|
||||
privileged: true
|
||||
image: offen/docker-volume-backup:test-sandbox
|
||||
healthcheck:
|
||||
test: ["CMD", "docker", "info"]
|
||||
interval: 1s
|
||||
timeout: 5s
|
||||
retries: 50
|
||||
volumes:
|
||||
- ./:/code
|
||||
- ${TARBALL:-.}:/cache/image.tar.gz
|
||||
- docker_volume_backup_test_sandbox_image:/var/lib/docker/image
|
||||
- docker_volume_backup_test_sandbox_overlay2:/var/lib/docker/overlay2
|
||||
|
||||
worker1:
|
||||
<<: *node
|
||||
hostname: worker1
|
||||
profiles:
|
||||
- multinode
|
||||
worker2:
|
||||
<<: *node
|
||||
hostname: worker2
|
||||
profiles:
|
||||
- multinode
|
||||
|
||||
volumes:
|
||||
docker_volume_backup_test_sandbox_image:
|
||||
docker_volume_backup_test_sandbox_overlay2:
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
openapi_mock:
|
||||
image: muonsoft/openapi-mock:0.3.9
|
||||
@@ -44,7 +42,7 @@ services:
|
||||
DROPBOX_CONCURRENCY_LEVEL: 6
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -13,7 +11,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-local}:/local
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
12
test/googledrive/credentials.json
Normal file
12
test/googledrive/credentials.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "dummy-project",
|
||||
"private_key_id": "dummykeyid",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCus0CDXvrHhl6a\nLBj7onfU3vRExQQAPstSovS4x3/3BLJNbdMUjrxWnmV5I+Y/U1iw18+8I87CMJDA\n+rIG37tSQ6WYhj2d9ym31O2EgVDQJMkVack/rdXCoWYWn6o7dZcv4K5MEtwW8uWQ\n5PEw0wbK7NIHSSotB9RajzHnLFkSu2XcEThlOp+wkfpTCYGg6+uCBJcMwUBR45eJ\nBLcvifBJVpWaAdj7DcYqWSxRQxensqB5wzCTatwwxDZo3KxnXsf2XRU+C3B71e5q\nb26XTkuIe9W04pj9Fp3fM7RgPSJpElMRFnPUliRhkyppspfYJBYQlpdzDdqKGkGK\nLMDu2c8DAgMBAAECggEAARG8QQ+HJqWNF4VSKCXPO0+C8RtD/IULCNX3NhJzTO4c\nI3ezrp9mlGsUWvPAPAarHmYbgBJtU2I+EZsmse4TaWhcIyVnMm+Dpy1ECucpZoeU\nqIgWe90iW9daBiC3NtRXIlSQNVGjM0mpX8olZM924am6o5/wNh2CP+hsRayBAkqf\nZojppQxYnI+WNNqOlke0T8FoWWm1ZX1gHAJQAeiLpDG675lckP5WxK0RmmKOW/UM\nFU/D4+csMG3eJPhT/Qm3LyAB+pNGpfzHuQXD5jubUhUq2uSsH4ko23wSl0nGHXRW\nX3YhlMDbK4bZtG7YNHQTmh05l6HvEQVbxgHTQLN9gQKBgQDTDDlBQEkLLCWyjmja\nTNt6308CZWZIrWMVtlrpY7S0a6NKm0YGhnXsDGRY4UCNqfMv7xmIw0efN4x90JoX\nglOVeODWgCJHqt6Zzsl8zbEOgbBEvcUO0dMa5PdpMzqd2Y2WghDH1PcrXueMVNXO\nUdf7Rs157LXx5+NouzfGZVmBwQKBgQDT6RwjWV04cxXsCg3QJ06q6YsVeoAawtQE\nWLQ13e0Soa2sBH5TbuOkEQIXVRAVeGSlPfL7N5FsSiZz+ozIhRdTTgNAHqF/TJCf\nEuLEb32Sfw/krLon0LoHBf6GgP+lWqvG4K2YCoAJwBlyHKoQuvbxGer7quuQ29V1\nDqmRL8g5wwKBgQDC0UjU/BOxVYpi/mS6BzKfhR35F0NJGY0a0N+xDBIWbjopN5Z3\nlY2rXXEQPraJTvWnLO8EOUeXKP7ucS6dPvgLRa8/Mr7yK0Aa+TEznOixfHQLsKYE\nXRqje/MLUHfumJHD+sKkxOl5Rr015GYNc62NTjmFMEZwTN+2oQQGhy4NwQKBgBrA\n6W6FD8Hatb/RHSFUdRga2BZkGtxGEKJj2IycchvSEa0P/CroaxEBnLP5Z0hupLY/\n9fdFcrSrP+OQlEmUk/dOeBaWR2lc7z1GEx8dvErMg+Mo82+naHUOiq3Mh3oG0n0P\nTJtPaA7TE+NWPxpRoG+cCBCx6X+mYXKf4USVNcAlAoGBAMH2a8qlnU/lrXSNGcrd\na2TNVi2qDfy0fU6IVFGEydmLMB3wuUUCUcBS6n1d62FqdJY9Rf1wKVIeZgtqJbCv\nOculz64WaXP8TSVrXnqfW8rUsYSTIdV+/P8gxJ9gYGS8E8KZSW5a8yRDc0jcKGI6\nzUJ8tz0Q5jEWC4MdDm7G1XrG\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "dummy@dummy-project.iam.gserviceaccount.com",
|
||||
"client_id": "dummyclientid",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/dummy%40dummy-project.iam.gserviceaccount.com"
|
||||
}
|
||||
52
test/googledrive/docker-compose.yml
Normal file
52
test/googledrive/docker-compose.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
services:
|
||||
openapi_mock:
|
||||
image: muonsoft/openapi-mock:0.3.9
|
||||
environment:
|
||||
OPENAPI_MOCK_USE_EXAMPLES: if_present
|
||||
OPENAPI_MOCK_SPECIFICATION_URL: '/etc/openapi/googledrive_v3.yaml'
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ${SPEC_FILE:-./googledrive_v3.yaml}:/etc/openapi/googledrive_v3.yaml
|
||||
|
||||
oauth2_mock:
|
||||
image: ghcr.io/navikt/mock-oauth2-server:1.0.0
|
||||
ports:
|
||||
- 8090:8090
|
||||
environment:
|
||||
PORT: 8090
|
||||
JSON_CONFIG_PATH: '/etc/oauth2/config.json'
|
||||
volumes:
|
||||
- ./oauth2_config.json:/etc/oauth2/config.json
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
hostname: hostnametoken
|
||||
depends_on:
|
||||
- openapi_mock
|
||||
- oauth2_mock
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_FILENAME_EXPAND: 'true'
|
||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
GOOGLE_DRIVE_ENDPOINT: http://openapi_mock:8080
|
||||
GOOGLE_DRIVE_TOKEN_URL: http://oauth2_mock:8090/issuer1/token
|
||||
GOOGLE_DRIVE_CREDENTIALS_JSON_FILE: /etc/gdrive/credentials.json
|
||||
GOOGLE_DRIVE_FOLDER_ID: "root"
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- ./credentials.json:/etc/gdrive/credentials.json
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
139
test/googledrive/googledrive_v3.yaml
Normal file
139
test/googledrive/googledrive_v3.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: Minimal Google Drive API Mock
|
||||
version: 1.0.0
|
||||
description: Minimal mock implementation of Google Drive API v3 for testing
|
||||
servers:
|
||||
- url: /
|
||||
paths:
|
||||
/upload/drive/v3/files:
|
||||
post:
|
||||
summary: Upload file to Google Drive
|
||||
parameters:
|
||||
- name: uploadType
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: fields
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: supportsAllDrives
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
- name: alt
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: prettyPrint
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
requestBody:
|
||||
content:
|
||||
multipart/related:
|
||||
schema:
|
||||
type: string
|
||||
format: binary
|
||||
responses:
|
||||
'200':
|
||||
description: File uploaded successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: "The ID of the file"
|
||||
name:
|
||||
type: string
|
||||
description: "The name of the file (extracted from request.metadata.name)"
|
||||
mimeType:
|
||||
type: string
|
||||
description: "The MIME type of the file"
|
||||
size:
|
||||
type: string
|
||||
description: "The size of the file in bytes"
|
||||
examples:
|
||||
UploadSuccess:
|
||||
summary: "Response when file is uploaded successfully"
|
||||
description: "The response includes the filename from the request metadata"
|
||||
value:
|
||||
id: "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms"
|
||||
name: "test-backup.tar.gz"
|
||||
mimeType: "application/gzip"
|
||||
/files:
|
||||
get:
|
||||
summary: List files in Google Drive
|
||||
parameters:
|
||||
- name: q
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
description: "A query for filtering the file results"
|
||||
- name: fields
|
||||
in: query
|
||||
schema:
|
||||
type: string
|
||||
- name: supportsAllDrives
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
- name: includeItemsFromAllDrives
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
responses:
|
||||
'200':
|
||||
description: Files listed successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
files:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: "The ID of the file"
|
||||
name:
|
||||
type: string
|
||||
description: "The name of the file"
|
||||
mimeType:
|
||||
type: string
|
||||
description: "The MIME type of the file"
|
||||
createdTime:
|
||||
type: string
|
||||
description: "The time the file was created"
|
||||
examples:
|
||||
FilesList:
|
||||
value:
|
||||
files:
|
||||
- id: "1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms"
|
||||
name: "test-hostnametoken.tar.gz"
|
||||
createdTime: "CREATED_TIME_1"
|
||||
- id: "jgmUUqptlbs74OgvE2upms1BxiMVs0XRA5nFMdKvBdBZ"
|
||||
name: "test-hostnametoken-old.tar.gz"
|
||||
createdTime: "CREATED_TIME_2"
|
||||
|
||||
/files/{fileId}:
|
||||
delete:
|
||||
summary: Delete a file from Google Drive
|
||||
parameters:
|
||||
- name: fileId
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: supportsAllDrives
|
||||
in: query
|
||||
schema:
|
||||
type: boolean
|
||||
responses:
|
||||
'204':
|
||||
description: File deleted successfully
|
||||
37
test/googledrive/oauth2_config.json
Normal file
37
test/googledrive/oauth2_config.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"interactiveLogin": true,
|
||||
"httpServer": "NettyWrapper",
|
||||
"tokenCallbacks": [
|
||||
{
|
||||
"issuerId": "issuer1",
|
||||
"tokenExpiry": 120,
|
||||
"requestMappings": [
|
||||
{
|
||||
"requestParam": "scope",
|
||||
"match": "scope1",
|
||||
"claims": {
|
||||
"sub": "subByScope",
|
||||
"aud": [
|
||||
"audByScope"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"issuerId": "issuer2",
|
||||
"requestMappings": [
|
||||
{
|
||||
"requestParam": "someparam",
|
||||
"match": "somevalue",
|
||||
"claims": {
|
||||
"sub": "subBySomeParam",
|
||||
"aud": [
|
||||
"audBySomeParam"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
59
test/googledrive/run.sh
Executable file
59
test/googledrive/run.sh
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export SPEC_FILE=$(mktemp -d)/googledrive_v3.yaml
|
||||
cp googledrive_v3.yaml $SPEC_FILE
|
||||
sed -i 's/CREATED_TIME_1/'"$(date "+%Y-%m-%dT%H:%M:%SZ")/g" $SPEC_FILE
|
||||
sed -i 's/CREATED_TIME_2/'"$(date "+%Y-%m-%dT%H:%M:%SZ" -d "14 days ago")/g" $SPEC_FILE
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
logs=$(docker compose exec backup backup | tee /dev/stderr)
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "4"
|
||||
|
||||
if echo "$logs" | grep -q "ERROR"; then
|
||||
fail "Backup failed, check logs for error"
|
||||
else
|
||||
pass "Backup succeeded, no errors reported."
|
||||
fi
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
logs=$(docker compose exec -T backup backup | tee /dev/stderr)
|
||||
|
||||
if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then
|
||||
pass "Remote backups have not been deleted."
|
||||
else
|
||||
fail "Remote backups would have been deleted: $logs"
|
||||
fi
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create second backup and prune"
|
||||
|
||||
logs=$(docker compose exec -T backup backup | tee /dev/stderr)
|
||||
|
||||
if echo "$logs" | grep -q "Pruned 1 out of 2 backups as they were older"; then
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
elif echo "$logs" | grep -q "ERROR"; then
|
||||
fail "Pruning failed, errors reported: $logs"
|
||||
elif echo "$logs" | grep -q "None of 1 existing backups were pruned"; then
|
||||
fail "Pruning failed, old backup has not been pruned: $logs"
|
||||
else
|
||||
fail "Pruning failed, unknown result: $logs"
|
||||
fi
|
||||
25
test/gpg-asym/docker-compose.yml
Normal file
25
test/gpg-asym/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
GPG_PUBLIC_KEY_RING_FILE: /keys/public_key.asc
|
||||
volumes:
|
||||
- ${KEY_DIR:-.}/public_key.asc:/keys/public_key.asc
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
49
test/gpg-asym/run.sh
Executable file
49
test/gpg-asym/run.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
export KEY_DIR=$(mktemp -d)
|
||||
|
||||
export PASSPHRASE="test"
|
||||
|
||||
gpg --batch --gen-key <<EOF
|
||||
Key-Type: RSA
|
||||
Key-Length: 4096
|
||||
Name-Real: offen
|
||||
Name-Email: docker-volume-backup@local
|
||||
Expire-Date: 0
|
||||
Passphrase: $PASSPHRASE
|
||||
%commit
|
||||
EOF
|
||||
|
||||
gpg --export --armor --batch --yes --pinentry-mode loopback --passphrase $PASSPHRASE --output $KEY_DIR/public_key.asc
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
gpg -d --pinentry-mode loopback --yes --passphrase $PASSPHRASE "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||
|
||||
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -13,7 +11,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -15,7 +13,7 @@ services:
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user