mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 09:08:02 +01:00
Compare commits
120 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89655e09ad | ||
|
|
016e470f5f | ||
|
|
0f30b959f8 | ||
|
|
eb4099debd | ||
|
|
d8ac5ae7e6 | ||
|
|
bad2d98ac8 | ||
|
|
2884d89f47 | ||
|
|
fcdaa09538 | ||
|
|
c48ac28626 | ||
|
|
22a4346c06 | ||
|
|
41d518a341 | ||
|
|
8f0a1c9809 | ||
|
|
75f94b0211 | ||
|
|
56f325a8bd | ||
|
|
7e6ed752f7 | ||
|
|
00cf059f4f | ||
|
|
cbbaa6ba7a | ||
|
|
2652e05169 | ||
|
|
23756074f9 | ||
|
|
40b12b9d07 | ||
|
|
e628f09122 | ||
|
|
7340e00dab | ||
|
|
958585336a | ||
|
|
68b7e4d678 | ||
|
|
857e4fc605 | ||
|
|
8d26194809 | ||
|
|
3063288d1e | ||
|
|
02fdfb363c | ||
|
|
2ee23a9384 | ||
|
|
16be0c0217 | ||
|
|
4799795f0a | ||
|
|
49b8d2f8d8 | ||
|
|
e4beef200a | ||
|
|
e75ab8bdd8 | ||
|
|
a4145352f9 | ||
|
|
615256cda9 | ||
|
|
40c4f11d70 | ||
|
|
2685571c58 | ||
|
|
04ad0777e0 | ||
|
|
50e41eac02 | ||
|
|
94e59a102e | ||
|
|
964a5e0342 | ||
|
|
2363c3c9cb | ||
|
|
6bc66db833 | ||
|
|
de40eae4de | ||
|
|
731421e359 | ||
|
|
d46918b13a | ||
|
|
2fb63059b3 | ||
|
|
e0fcbd27e5 | ||
|
|
f4884bf190 | ||
|
|
52787a1e42 | ||
|
|
6e08ae7c39 | ||
|
|
0183db831b | ||
|
|
f481fda848 | ||
|
|
f4cf4173e6 | ||
|
|
681983608f | ||
|
|
45335ffb67 | ||
|
|
01a595607d | ||
|
|
119391e8df | ||
|
|
dd5f7f5b66 | ||
|
|
c54a5bef5f | ||
|
|
8fac9608ff | ||
|
|
3ee40b6422 | ||
|
|
8b5c9a494f | ||
|
|
44ad3bbda2 | ||
|
|
74e065cbb9 | ||
|
|
8a64da4b0b | ||
|
|
f97ce11734 | ||
|
|
336e12f874 | ||
|
|
016c6c8307 | ||
|
|
e22f317fbb | ||
|
|
e04bd2f066 | ||
|
|
c4eeaad813 | ||
|
|
5840f1c5dc | ||
|
|
d71b7304c2 | ||
|
|
fbc7f85d9f | ||
|
|
2af5bdf4d9 | ||
|
|
631ca3e07d | ||
|
|
3d35d7c00e | ||
|
|
954bde73fb | ||
|
|
ab46e96706 | ||
|
|
ab4ce94534 | ||
|
|
e4170addb6 | ||
|
|
b8410bbdc5 | ||
|
|
24e1341589 | ||
|
|
3d0286472b | ||
|
|
bb11ae035b | ||
|
|
9209037ed9 | ||
|
|
2e73dea4f7 | ||
|
|
7dc3ae17e7 | ||
|
|
9d5ea718a0 | ||
|
|
272495ae7d | ||
|
|
8beb28d4f8 | ||
|
|
0ec2e68076 | ||
|
|
b85afa6008 | ||
|
|
4cb47a4818 | ||
|
|
9b5ba8958d | ||
|
|
0327701e2d | ||
|
|
58f26ba004 | ||
|
|
f62ef6e05a | ||
|
|
40924434e4 | ||
|
|
e613f6046f | ||
|
|
292d47eb19 | ||
|
|
7637975e3f | ||
|
|
c47a14c53a | ||
|
|
9f795761d6 | ||
|
|
f2ef48803c | ||
|
|
8b69566291 | ||
|
|
bf79c913e0 | ||
|
|
2f7193aa9b | ||
|
|
550c4f520f | ||
|
|
1af472077c | ||
|
|
a077f12c11 | ||
|
|
cb5a38a1b7 | ||
|
|
b8995dbc51 | ||
|
|
baf34ec1f7 | ||
|
|
e8562b1785 | ||
|
|
5d7451410b | ||
|
|
440bcf76ce | ||
|
|
2d3e79cf5e |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,3 +0,0 @@
|
||||
github: offen
|
||||
patreon: offen
|
||||
|
||||
4
.github/workflows/deploy-docs.yml
vendored
4
.github/workflows/deploy-docs.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: 'docs/_site/'
|
||||
|
||||
@@ -52,4 +52,4 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.24'
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
# Require: The version of golangci-lint to use.
|
||||
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
||||
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
||||
version: v1.54
|
||||
version: v1.64
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
2
.github/workflows/unit.yml
vendored
2
.github/workflows/unit.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.x'
|
||||
go-version: '1.24.x'
|
||||
- name: Install dependencies
|
||||
run: go mod download
|
||||
- name: Test with the Go CLI
|
||||
|
||||
@@ -5,4 +5,5 @@ linters:
|
||||
- staticcheck
|
||||
- govet
|
||||
output:
|
||||
format: github-actions
|
||||
formats:
|
||||
- format: colored-line-number
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
FROM golang:1.22-alpine as builder
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@@ -9,7 +9,7 @@ RUN go mod download
|
||||
WORKDIR /app/cmd/backup
|
||||
RUN go build -o backup .
|
||||
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.21
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
|
||||
@@ -76,4 +76,9 @@ docker run --rm \
|
||||
offen/docker-volume-backup:v2
|
||||
```
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described [in the docs](https://offen.github.io/docker-volume-backup/reference/).
|
||||
|
||||
---
|
||||
|
||||
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License</a>.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
||||
@@ -22,8 +22,7 @@ import (
|
||||
)
|
||||
|
||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||
_, outputFilePath, err := makeAbsolute(stripTrailingSlashes(inputFilePath), outputFilePath)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error transposing given file paths")
|
||||
}
|
||||
@@ -31,7 +30,7 @@ func createArchive(files []string, inputFilePath, outputFilePath string, compres
|
||||
return errwrap.Wrap(err, "error creating output file path")
|
||||
}
|
||||
|
||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
||||
if err := compress(files, outputFilePath, compression, compressionConcurrency); err != nil {
|
||||
return errwrap.Wrap(err, "error creating archive")
|
||||
}
|
||||
|
||||
@@ -55,7 +54,7 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
||||
return inputFilePath, outputFilePath, err
|
||||
}
|
||||
|
||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
||||
func compress(paths []string, outFilePath, algo string, concurrency int) error {
|
||||
file, err := os.Create(outFilePath)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error creating out file")
|
||||
@@ -94,6 +93,8 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
||||
|
||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||
switch algo {
|
||||
case "none":
|
||||
return &passThroughWriteCloser{file}, nil
|
||||
case "gz":
|
||||
w, err := pgzip.NewWriterLevel(file, 5)
|
||||
if err != nil {
|
||||
@@ -166,3 +167,15 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type passThroughWriteCloser struct {
|
||||
target io.WriteCloser
|
||||
}
|
||||
|
||||
func (p *passThroughWriteCloser) Write(b []byte) (int, error) {
|
||||
return p.target.Write(b)
|
||||
}
|
||||
|
||||
func (p *passThroughWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -131,13 +131,9 @@ func (c *command) schedule(strategy configStrategy) error {
|
||||
c.logger.Warn(
|
||||
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error scheduling")
|
||||
}
|
||||
c.schedules = append(c.schedules, id)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -47,6 +47,9 @@ type Config struct {
|
||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||
GpgPassphrase string `split_words:"true"`
|
||||
GpgPublicKeyRing string `split_words:"true"`
|
||||
AgePassphrase string `split_words:"true"`
|
||||
AgePublicKeys []string `split_words:"true"`
|
||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||
NotificationLevel string `split_words:"true" default:"error"`
|
||||
EmailNotificationRecipient string `split_words:"true"`
|
||||
@@ -72,9 +75,11 @@ type Config struct {
|
||||
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||
AzureStorageAccountName string `split_words:"true"`
|
||||
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
||||
AzureStorageConnectionString string `split_words:"true"`
|
||||
AzureStorageContainerName string `split_words:"true"`
|
||||
AzureStoragePath string `split_words:"true"`
|
||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||
AzureStorageAccessTier string `split_words:"true"`
|
||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||
DropboxRefreshToken string `split_words:"true"`
|
||||
@@ -90,7 +95,7 @@ type CompressionType string
|
||||
|
||||
func (c *CompressionType) Decode(v string) error {
|
||||
switch v {
|
||||
case "gz", "zst":
|
||||
case "none", "gz", "zst":
|
||||
*c = CompressionType(v)
|
||||
return nil
|
||||
default:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,64 +1,226 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"filippo.io/age"
|
||||
"filippo.io/age/agessh"
|
||||
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
|
||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
||||
// In case no passphrase is given it returns early, leaving the backup file
|
||||
func countTrue(b ...bool) int {
|
||||
c := int(0)
|
||||
for _, v := range b {
|
||||
if v {
|
||||
c++
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase or publickey(s).
|
||||
// In case no passphrase or publickey is given it returns early, leaving the backup file
|
||||
// untouched.
|
||||
func (s *script) encryptArchive() error {
|
||||
if s.c.GpgPassphrase == "" {
|
||||
useGPGSymmetric := s.c.GpgPassphrase != ""
|
||||
useGPGAsymmetric := s.c.GpgPublicKeyRing != ""
|
||||
useAgeSymmetric := s.c.AgePassphrase != ""
|
||||
useAgeAsymmetric := len(s.c.AgePublicKeys) > 0
|
||||
switch nconfigured := countTrue(
|
||||
useGPGSymmetric,
|
||||
useGPGAsymmetric,
|
||||
useAgeSymmetric,
|
||||
useAgeAsymmetric,
|
||||
); nconfigured {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
// ok!
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"error in selecting archive encryption method: expected 0 or 1 to be configured, %d methods are configured",
|
||||
nconfigured,
|
||||
)
|
||||
}
|
||||
|
||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||
if useGPGSymmetric {
|
||||
return s.encryptWithGPGSymmetric()
|
||||
} else if useGPGAsymmetric {
|
||||
return s.encryptWithGPGAsymmetric()
|
||||
} else if useAgeSymmetric || useAgeAsymmetric {
|
||||
ar, err := s.getConfiguredAgeRecipients()
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "failed to get configured age recipients")
|
||||
}
|
||||
return s.encryptWithAge(ar)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *script) getConfiguredAgeRecipients() ([]age.Recipient, error) {
|
||||
if s.c.AgePassphrase == "" && len(s.c.AgePublicKeys) == 0 {
|
||||
return nil, fmt.Errorf("no age recipients configured")
|
||||
}
|
||||
recipients := []age.Recipient{}
|
||||
if len(s.c.AgePublicKeys) > 0 {
|
||||
for _, pk := range s.c.AgePublicKeys {
|
||||
pkr, err := parseAgeRecipient(pk)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "failed to parse age public key")
|
||||
}
|
||||
recipients = append(recipients, pkr)
|
||||
}
|
||||
}
|
||||
if s.c.AgePassphrase != "" {
|
||||
if len(recipients) != 0 {
|
||||
return nil, fmt.Errorf("age encryption must only be enabled via passphrase or public key, not both")
|
||||
}
|
||||
|
||||
r, err := age.NewScryptRecipient(s.c.AgePassphrase)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "failed to create scrypt identity from age passphrase")
|
||||
}
|
||||
recipients = append(recipients, r)
|
||||
}
|
||||
return recipients, nil
|
||||
}
|
||||
|
||||
func parseAgeRecipient(arg string) (age.Recipient, error) {
|
||||
// This logic is adapted from what the age CLI is doing
|
||||
// stripping some special cases
|
||||
switch {
|
||||
case strings.HasPrefix(arg, "age1"):
|
||||
return age.ParseX25519Recipient(arg)
|
||||
case strings.HasPrefix(arg, "ssh-"):
|
||||
return agessh.ParseRecipient(arg)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown recipient type: %q", arg)
|
||||
}
|
||||
|
||||
func (s *script) encryptWithAge(rec []age.Recipient) error {
|
||||
return s.doEncrypt("age", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||
return age.Encrypt(ciphertextWriter, rec...)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *script) encryptWithGPGSymmetric() error {
|
||||
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||
_, name := path.Split(s.file)
|
||||
return openpgp.SymmetricallyEncrypt(ciphertextWriter, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
})
|
||||
}
|
||||
|
||||
type closeAllWriter struct {
|
||||
io.Writer
|
||||
closers []io.Closer
|
||||
}
|
||||
|
||||
func (c *closeAllWriter) Close() (err error) {
|
||||
for _, cl := range c.closers {
|
||||
err = errors.Join(err, cl.Close())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var _ io.WriteCloser = (*closeAllWriter)(nil)
|
||||
|
||||
func (s *script) encryptWithGPGAsymmetric() error {
|
||||
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (_ io.WriteCloser, outerr error) {
|
||||
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(s.c.GpgPublicKeyRing)))
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error parsing armored keyring")
|
||||
}
|
||||
|
||||
armoredWriter, err := armor.Encode(ciphertextWriter, "PGP MESSAGE", nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error preparing encryption")
|
||||
}
|
||||
defer func() {
|
||||
if outerr != nil {
|
||||
_ = armoredWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
_, name := path.Split(s.file)
|
||||
encWriter, err := openpgp.Encrypt(armoredWriter, entityList, nil, nil, &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &closeAllWriter{
|
||||
Writer: encWriter,
|
||||
closers: []io.Closer{encWriter, armoredWriter},
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *script) doEncrypt(
|
||||
extension string,
|
||||
encryptor func(ciphertextWriter io.Writer) (io.WriteCloser, error),
|
||||
) (outerr error) {
|
||||
encFile := fmt.Sprintf("%s.%s", s.file, extension)
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
if err := remove(gpgFile); err != nil {
|
||||
return errwrap.Wrap(err, "error removing gpg file")
|
||||
if err := remove(encFile); err != nil {
|
||||
return errwrap.Wrap(err, "error removing encrypted file")
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||
fmt.Sprintf("Removed encrypted file `%s`.", encFile),
|
||||
)
|
||||
return nil
|
||||
})
|
||||
|
||||
outFile, err := os.Create(gpgFile)
|
||||
outFile, err := os.Create(encFile)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error opening out file")
|
||||
}
|
||||
defer outFile.Close()
|
||||
defer func() {
|
||||
if err := outFile.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing out file"))
|
||||
}
|
||||
}()
|
||||
|
||||
_, name := path.Split(s.file)
|
||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||
FileName: name,
|
||||
}, nil)
|
||||
dst, err := encryptor(outFile)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error encrypting backup file")
|
||||
}
|
||||
defer dst.Close()
|
||||
defer func() {
|
||||
if err := dst.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing encrypted backup file"))
|
||||
}
|
||||
}()
|
||||
|
||||
src, err := os.Open(s.file)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file %q", s.file))
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing backup file"))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||
}
|
||||
|
||||
s.file = gpgFile
|
||||
s.file = encFile
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
||||
fmt.Sprintf("Encrypted backup using %q, saving as %q", extension, s.file),
|
||||
)
|
||||
return nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cosiner/argv"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
@@ -24,11 +24,19 @@ import (
|
||||
)
|
||||
|
||||
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
|
||||
args, _ := argv.Argv(command, nil, nil)
|
||||
args, err := argv.Argv(command, nil, nil)
|
||||
if err != nil {
|
||||
return nil, nil, errwrap.Wrap(err, fmt.Sprintf("error parsing argv from '%s'", command))
|
||||
}
|
||||
if len(args) == 0 {
|
||||
return nil, nil, errwrap.Wrap(nil, "received unexpected empty command")
|
||||
}
|
||||
|
||||
commandEnv := []string{
|
||||
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
||||
}
|
||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
||||
|
||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, container.ExecOptions{
|
||||
Cmd: args[0],
|
||||
AttachStdin: true,
|
||||
AttachStderr: true,
|
||||
@@ -39,21 +47,29 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
||||
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
||||
}
|
||||
|
||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, container.ExecStartOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
||||
}
|
||||
defer resp.Close()
|
||||
|
||||
var outBuf, errBuf bytes.Buffer
|
||||
var outBuf, errBuf, fullRespBuf bytes.Buffer
|
||||
outputDone := make(chan error)
|
||||
|
||||
tee := io.TeeReader(resp.Reader, &fullRespBuf)
|
||||
|
||||
go func() {
|
||||
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
|
||||
_, err := stdcopy.StdCopy(&outBuf, &errBuf, tee)
|
||||
outputDone <- err
|
||||
}()
|
||||
|
||||
if err := <-outputDone; err != nil {
|
||||
if body, bErr := io.ReadAll(&fullRespBuf); bErr == nil {
|
||||
// if possible, try to append the exec output to the error
|
||||
// as it's likely to be more relevant for users than the error from
|
||||
// calling stdcopy.Copy
|
||||
err = errwrap.Wrap(errors.New(string(body)), err.Error())
|
||||
}
|
||||
return nil, nil, errwrap.Wrap(err, "error demultiplexing output")
|
||||
}
|
||||
|
||||
@@ -88,7 +104,7 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||
})
|
||||
}
|
||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -101,7 +117,7 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Key: "label",
|
||||
Value: "docker-volume-backup.exec-pre",
|
||||
}
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -118,7 +134,7 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
Key: "label",
|
||||
Value: "docker-volume-backup.exec-post",
|
||||
}
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(f...),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2021-2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -6,6 +6,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
func runScript(c *Config) (err error) {
|
||||
defer func() {
|
||||
if derr := recover(); derr != nil {
|
||||
fmt.Printf("%s: %s\n", derr, debug.Stack())
|
||||
asErr, ok := derr.(error)
|
||||
if ok {
|
||||
err = errwrap.Wrap(asErr, "unexpected panic running script")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -86,7 +86,12 @@ func (s *script) init() error {
|
||||
|
||||
var bf bytes.Buffer
|
||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||
"Extension": func() string {
|
||||
if s.c.BackupCompression == "none" {
|
||||
return "tar"
|
||||
}
|
||||
return fmt.Sprintf("tar.%s", s.c.BackupCompression)
|
||||
}(),
|
||||
}); tErr != nil {
|
||||
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||
}
|
||||
@@ -193,6 +198,8 @@ func (s *script) init() error {
|
||||
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
|
||||
Endpoint: s.c.AzureStorageEndpoint,
|
||||
RemotePath: s.c.AzureStoragePath,
|
||||
ConnectionString: s.c.AzureStorageConnectionString,
|
||||
AccessTier: s.c.AzureStorageAccessTier,
|
||||
}
|
||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
@@ -14,9 +14,11 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/command/service/progress"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
ctr "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
)
|
||||
@@ -65,7 +67,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
||||
),
|
||||
)
|
||||
case <-poll.C:
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
containers, err := cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
||||
@@ -82,7 +84,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
||||
}
|
||||
|
||||
func isSwarm(c interface {
|
||||
Info(context.Context) (types.Info, error)
|
||||
Info(context.Context) (system.Info, error)
|
||||
}) (bool, error) {
|
||||
info, err := c.Info(context.Background())
|
||||
if err != nil {
|
||||
@@ -123,11 +125,11 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
labelValue,
|
||||
)
|
||||
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{})
|
||||
if err != nil {
|
||||
return noop, errwrap.Wrap(err, "error querying for containers")
|
||||
}
|
||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
containersToStop, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: filterMatchLabel,
|
||||
@@ -151,15 +153,21 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
}),
|
||||
Status: true,
|
||||
})
|
||||
if err != nil {
|
||||
return noop, errwrap.Wrap(err, "error querying for services to scale down")
|
||||
}
|
||||
for _, s := range matchingServices {
|
||||
if s.Spec.Mode.Replicated == nil {
|
||||
return noop, errwrap.Wrap(
|
||||
nil,
|
||||
fmt.Sprintf("only replicated services can be restarted, but found a label on service %s", s.Spec.Name),
|
||||
)
|
||||
}
|
||||
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
|
||||
serviceID: s.ID,
|
||||
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return noop, errwrap.Wrap(err, "error querying for services to scale down")
|
||||
}
|
||||
}
|
||||
|
||||
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
|
||||
@@ -303,7 +311,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||
if err := s.cli.ContainerStart(context.Background(), container.ID, ctr.StartOptions{}); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,16 +5,16 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
)
|
||||
|
||||
type mockInfoClient struct {
|
||||
result types.Info
|
||||
result system.Info
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockInfoClient) Info(context.Context) (types.Info, error) {
|
||||
func (m *mockInfoClient) Info(context.Context) (system.Info, error) {
|
||||
return m.result, m.err
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestIsSwarm(t *testing.T) {
|
||||
{
|
||||
"swarm",
|
||||
&mockInfoClient{
|
||||
result: types.Info{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: swarm.LocalNodeStateActive,
|
||||
},
|
||||
@@ -40,7 +40,7 @@ func TestIsSwarm(t *testing.T) {
|
||||
{
|
||||
"compose",
|
||||
&mockInfoClient{
|
||||
result: types.Info{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: swarm.LocalNodeStateInactive,
|
||||
},
|
||||
@@ -52,7 +52,7 @@ func TestIsSwarm(t *testing.T) {
|
||||
{
|
||||
"balena",
|
||||
&mockInfoClient{
|
||||
result: types.Info{
|
||||
result: system.Info{
|
||||
Swarm: swarm.Info{
|
||||
LocalNodeState: "",
|
||||
},
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
@@ -59,7 +59,7 @@ GEM
|
||||
rb-fsevent (0.11.2)
|
||||
rb-inotify (0.10.1)
|
||||
ffi (~> 1.0)
|
||||
rexml (3.2.6)
|
||||
rexml (3.3.9)
|
||||
rouge (3.30.0)
|
||||
safe_yaml (1.0.5)
|
||||
sassc (2.4.0)
|
||||
|
||||
@@ -30,6 +30,6 @@ nav_external_links:
|
||||
url: https://github.com/offen/docker-volume-backup
|
||||
|
||||
footer_content: >-
|
||||
Copyright © 2021 Offen Authors and contributors.
|
||||
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License.</a><br>
|
||||
Something missing, unclear or not working? Open <a href="https://github.com/offen/docker-volume-backup/issues">an issue</a>.
|
||||
|
||||
@@ -3,15 +3,7 @@ title: Encrypt backups using GPG
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
nav_exclude: true
|
||||
---
|
||||
|
||||
# Encrypt backups using GPG
|
||||
|
||||
The image supports encrypting backups using GPG out of the box.
|
||||
In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||
|
||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||
|
||||
```console
|
||||
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
||||
```
|
||||
See: [Encrypt Backups](encrypt-backups)
|
||||
|
||||
32
docs/how-tos/encrypt-backups.md
Normal file
32
docs/how-tos/encrypt-backups.md
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
title: Encrypting backups
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 7
|
||||
---
|
||||
|
||||
# Encrypting backups
|
||||
|
||||
The image supports encrypting backups using one of two available methods: **GPG** or **[age](https://age-encryption.org/)**
|
||||
|
||||
## Using GPG encryption
|
||||
|
||||
In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY_RING` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||
|
||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||
|
||||
```console
|
||||
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
||||
```
|
||||
|
||||
## Using age encryption
|
||||
|
||||
{: .note }
|
||||
Even though the `age` CLI tools supports encryption using SSH keys, this is not supported by this tool.
|
||||
`AGE_PUBLIC_KEYS` currently expects `age` keys to be given.
|
||||
|
||||
age allows backups to be encrypted with either a symmetric key (password) or a public key. One of those options are available for use.
|
||||
|
||||
Given `AGE_PASSPHRASE` being provided, the backup archive will be encrypted with the passphrase and saved as a `.age` file instead. Refer to age documentation for how to properly decrypt.
|
||||
|
||||
Given `AGE_PUBLIC_KEYS` being provided (allowing multiple by separating each public key with `,`), the backup archive will be encrypted with the provided public keys. It will also result in the archive being saved as a `.age` file.
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
- docker-volume-backup.copy-post=/bin/sh -c 'rsync $$COMMAND_RUNTIME_ARCHIVE_FILEPATH /destination'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
# other services defined here ...
|
||||
volumes:
|
||||
|
||||
@@ -9,6 +9,11 @@ parent: How Tos
|
||||
|
||||
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
||||
|
||||
{: .important }
|
||||
In a multi-node Swarm setup, commands can currently only be run on the node the `offen/docker-volume-backup` container is running on.
|
||||
Labeled containers on other nodes are not visible to the backup command.
|
||||
|
||||
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
||||
|
||||
- `archive` (the tar archive is created)
|
||||
@@ -46,6 +51,10 @@ If you have more than one `docker-volume-backup` container (possibly across seve
|
||||
multiple backup schedules, you will need to use `EXEC_LABEL` in the configuration and a `docker-volume-backup.exec-label` label on each
|
||||
container using custom commands to ensure that the commands are only run by the correct `docker-volume-backup` instance.
|
||||
|
||||
{: .important }
|
||||
In case you use `EXEC_LABEL` together with configuration mounted from `conf.d` it's important to understand that a distinct `EXEC_LABEL` __should be set in each configuration__.
|
||||
Else, schedules that do not specify an `EXEC_LABEL` will still trigger commands on all containers with such labels, no matter whether they specify `docker-volume-backup.exec-label` or not.
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
|
||||
@@ -33,5 +33,7 @@ Note: Using the "Generated access token" in the app console is not supported, as
|
||||
|
||||
## Other parameters
|
||||
|
||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, you can only write in the app's directory!
|
||||
This means, that `DROPBOX_REMOTE_PATH` must start with e.g. `/Apps/YOUR_APP_NAME` or `/Apps/YOUR_APP_NAME/some_sub_dir`
|
||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, `DROPBOX_REMOTE_PATH` will be a relative path under the App folder!
|
||||
(_For example, DROPBOX_REMOTE_PATH=/somedir means the backup file will be uploaded to /Apps/myapp/somedir_)
|
||||
On the other hand if you chose `Full Dropbox` access, the value for `DROPBOX_REMOTE_PATH` will represent an absolute path inside your Dropbox storage area.
|
||||
(_Still considering the same example above, the backup file will be uploaded to /somedir in your Dropbox root_)
|
||||
|
||||
@@ -25,7 +25,7 @@ services:
|
||||
Notification backends other than email are also supported.
|
||||
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
||||
|
||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||
|
||||
{: .note }
|
||||
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
||||
|
||||
@@ -88,7 +88,7 @@ docker run --rm \
|
||||
|
||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||
|
||||
### Available image registries
|
||||
## Available image registries
|
||||
|
||||
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
|
||||
@@ -100,7 +100,7 @@ docker pull ghcr.io/offen/docker-volume-backup:v2
|
||||
|
||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||
|
||||
### Supported Engines
|
||||
## Supported Engines
|
||||
|
||||
This tool is developed and tested against the Docker CE engine exclusively.
|
||||
While it may work against different implementations (e.g. Balena Engine), there are no guarantees about support for non-Docker engines.
|
||||
|
||||
@@ -190,7 +190,7 @@ services:
|
||||
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
||||
DROPBOX_APP_KEY: APP_KEY # replace
|
||||
DROPBOX_APP_SECRET: APP_SECRET # replace
|
||||
DROPBOX_REMOTE_PATH: /Apps/my-test-app/some_subdir # replace
|
||||
DROPBOX_REMOTE_PATH: /somedir # replace
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
@@ -289,7 +289,7 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Encrypting your backups using GPG
|
||||
## Encrypting your backups symmetrically using GPG
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
@@ -311,7 +311,34 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using mysqldump to prepare the backup
|
||||
## Encrypting your backups asymmetrically using GPG
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
environment:
|
||||
AWS_S3_BUCKET_NAME: backup-bucket
|
||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
GPG_PUBLIC_KEY_RING: |
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||
...
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
## Using mariadb-dump/mysqldump to prepare the backup
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
@@ -320,7 +347,7 @@ services:
|
||||
database:
|
||||
image: mariadb:latest
|
||||
labels:
|
||||
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||
- docker-volume-backup.archive-pre=/bin/sh -c 'mariadb-dump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||
volumes:
|
||||
- data:/tmp/dumps
|
||||
backup:
|
||||
@@ -331,7 +358,7 @@ services:
|
||||
volumes:
|
||||
- ./local:/archive
|
||||
- data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
|
||||
@@ -9,7 +9,7 @@ nav_order: 2
|
||||
Backup targets, schedule and retention are configured using environment variables.
|
||||
|
||||
{: .note }
|
||||
You can use any environment variable from below also with a `_FILE` suffix to be able to load the value from a file.
|
||||
As per established convention, you can use any environment variable key from below with a `_FILE` suffix in order to load the value from a file instead.
|
||||
This is typically useful when using [Docker Secrets](https://docs.docker.com/engine/swarm/secrets/) or similar.
|
||||
Note that secrets will not be trimmed of leading or trailing whitespace.
|
||||
|
||||
@@ -17,13 +17,14 @@ Note that secrets will not be trimmed of leading or trailing whitespace.
|
||||
In case you encounter double quoted values in your runtime configuration you might still be using an [older version of `docker-compose`][compose-issue].
|
||||
You can work around this by either updating `docker-compose` or unquoting your configuration values.
|
||||
|
||||
You can populate below template according to your requirements and use it as your `env_file`:
|
||||
You can populate below template according to your requirements and use it as your `env_file`.
|
||||
The values for each key currently match its default.
|
||||
|
||||
{% raw %}
|
||||
```
|
||||
########### BACKUP SCHEDULE
|
||||
|
||||
|
||||
# Backups can be run on fixed scheduled that are defined as a cron expression.
|
||||
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
|
||||
#
|
||||
# Field name | Mandatory? | Allowed values | Allowed special characters
|
||||
@@ -37,35 +38,45 @@ You can populate below template according to your requirements and use it as you
|
||||
#
|
||||
# Month and Day-of-week field values are case insensitive.
|
||||
# "SUN", "Sun", and "sun" are equally accepted.
|
||||
# If no value is set, `@daily` will be used.
|
||||
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
|
||||
# Refer to sites like <https://crontab.guru> for help.
|
||||
# If no value is set, `@daily` will be used, which runs every
|
||||
# day at midnight.
|
||||
|
||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||
# BACKUP_CRON_EXPRESSION="@daily"
|
||||
|
||||
# ---
|
||||
|
||||
# The compression algorithm used in conjunction with tar.
|
||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
||||
# Note that the selection affects the file extension.
|
||||
# Valid options are: "gz" (Gzip), "zst" (Zstd) or "none" (tar only).
|
||||
# Default is "gz". Note that the selection affects the file extension.
|
||||
|
||||
# BACKUP_COMPRESSION="gz"
|
||||
|
||||
# ---
|
||||
|
||||
# Parallelism level for "gz" (Gzip) compression.
|
||||
# Defines how many blocks of data are concurrently processed.
|
||||
# Higher values result in faster compression. No effect on decompression
|
||||
# Default = 1. Setting this to 0 will use all available threads.
|
||||
|
||||
# GZIP_PARALLELISM=1
|
||||
# GZIP_PARALLELISM="1"
|
||||
|
||||
# The name of the backup file including the extension.
|
||||
# Format verbs will be replaced as in `strftime`. Omitting them
|
||||
# ---
|
||||
|
||||
# The desired name of the backup file including the extension.
|
||||
# Format verbs will be replaced as in `strftime`. Omitting all verbs
|
||||
# will result in the same filename for every backup run, which means previous
|
||||
# versions will be overwritten on subsequent runs.
|
||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
||||
# in which case it will become either "tar.gz", "tar.zst" or ".tar" (depending
|
||||
# on your BACKUP_COMPRESSION setting).
|
||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||
|
||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"
|
||||
|
||||
# ---
|
||||
|
||||
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
||||
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
||||
# BACKUP_PRUNING_PREFIX that will get expanded at runtime,
|
||||
@@ -76,10 +87,15 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_FILENAME_EXPAND="true"
|
||||
|
||||
# ---
|
||||
|
||||
# When storing local backups, a symlink to the latest backup can be created
|
||||
# in case a value is given for this key. This has no effect on remote backups.
|
||||
# Example: "backup.latest.tar.gz"
|
||||
|
||||
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
||||
# BACKUP_LATEST_SYMLINK=""
|
||||
|
||||
# ---
|
||||
|
||||
# ************************************************************************
|
||||
# The BACKUP_FROM_SNAPSHOT option has been deprecated and will be removed
|
||||
@@ -93,191 +109,285 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_FROM_SNAPSHOT="false"
|
||||
|
||||
# By default, the `/backup` directory inside the container will be backed up.
|
||||
# In case you need to use a custom location, set `BACKUP_SOURCES`.
|
||||
# ---
|
||||
|
||||
# BACKUP_SOURCES="/other/location"
|
||||
# By default, the contents of the `/backup` directory inside the container
|
||||
# will be backed up. In case you need to use a custom location, set `BACKUP_SOURCES`.
|
||||
# Example: "/other/location"
|
||||
|
||||
# When given, all files in BACKUP_SOURCES whose full path matches the given
|
||||
# BACKUP_SOURCES="/backup"
|
||||
|
||||
# ---
|
||||
|
||||
# When a value is given, all files in BACKUP_SOURCES whose full path matches the
|
||||
# regular expression will be excluded from the archive. Regular Expressions
|
||||
# can be used as from the Go standard library https://pkg.go.dev/regexp
|
||||
# Example: "\.log$"
|
||||
|
||||
# BACKUP_EXCLUDE_REGEXP="\.log$"
|
||||
# BACKUP_EXCLUDE_REGEXP=""
|
||||
|
||||
# ---
|
||||
|
||||
# Exclude one or many storage backends from the pruning process.
|
||||
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
|
||||
# E.g. with one backend excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3
|
||||
# E.g. with multiple backends excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3,webdav
|
||||
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
|
||||
# Note: The name of the backends is case insensitive.
|
||||
# Note: The names of the backends are case insensitive.
|
||||
# Default: All backends get pruned.
|
||||
|
||||
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=
|
||||
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=""
|
||||
|
||||
########### BACKUP STORAGE
|
||||
########### S3 COMPATIBLE STORAGE
|
||||
|
||||
# The name of the remote bucket that should be used for storing backups. If
|
||||
# this is not set, no remote backups will be stored.
|
||||
# Example: "backup-bucket"
|
||||
|
||||
# AWS_S3_BUCKET_NAME="backup-bucket"
|
||||
# AWS_S3_BUCKET_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# If you want to store the backup in a non-root location on your bucket
|
||||
# you can provide a path. The path must not contain a leading slash.
|
||||
# Example: "my/backup/location"
|
||||
|
||||
# AWS_S3_PATH="my/backup/location"
|
||||
# AWS_S3_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# Define credentials for authenticating against the backup storage and a bucket
|
||||
# name. Although all of these keys are `AWS`-prefixed, the setup can be used
|
||||
# with any S3 compatible storage.
|
||||
|
||||
# AWS_ACCESS_KEY_ID="<xxx>"
|
||||
# AWS_SECRET_ACCESS_KEY="<xxx>"
|
||||
# AWS_ACCESS_KEY_ID=""
|
||||
# AWS_SECRET_ACCESS_KEY=""
|
||||
|
||||
# ---
|
||||
|
||||
# Instead of providing static credentials, you can also use IAM instance profiles
|
||||
# or similar to provide authentication. Some possible configuration options on AWS:
|
||||
# - EC2: http://169.254.169.254
|
||||
# - ECS: http://169.254.170.2
|
||||
|
||||
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
|
||||
# AWS_IAM_ROLE_ENDPOINT=""
|
||||
|
||||
# ---
|
||||
|
||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||
# Do not set this when working against AWS S3 (the default value is
|
||||
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
||||
# will need to use the option below.
|
||||
# If you need to set a specific (non-https) protocol, you will need to use the option below.
|
||||
# The default value points to the standard AWS S3 endpoint.
|
||||
|
||||
# AWS_ENDPOINT="storage.example.com"
|
||||
# AWS_ENDPOINT="s3.amazonaws.com"
|
||||
|
||||
# The protocol to be used when communicating with your storage server.
|
||||
# ---
|
||||
|
||||
# The protocol to be used when communicating with your S3 storage server.
|
||||
# Defaults to "https". You can set this to "http" when communicating with
|
||||
# a different Docker container on the same host for example.
|
||||
# a different Docker container in the same virtual network for example.
|
||||
|
||||
# AWS_ENDPOINT_PROTO="https"
|
||||
|
||||
# ---
|
||||
|
||||
# Setting this variable to `true` will disable verification of
|
||||
# SSL certificates for AWS_ENDPOINT. You shouldn't use this unless you use
|
||||
# self-signed certificates for your remote storage backend. This can only be
|
||||
# used when AWS_ENDPOINT_PROTO is set to `https`.
|
||||
|
||||
# AWS_ENDPOINT_INSECURE="true"
|
||||
# AWS_ENDPOINT_INSECURE="false"
|
||||
|
||||
# ---
|
||||
|
||||
# If you wish to use self signed certificates your S3 server, you can pass
|
||||
# the location of a PEM encoded CA certificate and it will be used for
|
||||
# validating your certificates.
|
||||
# Alternatively, pass a PEM encoded string containing the certificate.
|
||||
# validating your certificates. Alternatively, pass a PEM encoded string
|
||||
# containing the certificate.
|
||||
# Example: "/path/to/cert.pem"
|
||||
|
||||
# AWS_ENDPOINT_CA_CERT="/path/to/cert.pem"
|
||||
# AWS_ENDPOINT_CA_CERT=""
|
||||
|
||||
# Setting this variable will change the S3 storage class header.
|
||||
# Defaults to "STANDARD", you can set this value according to your needs.
|
||||
# ---
|
||||
|
||||
# AWS_STORAGE_CLASS="GLACIER"
|
||||
# Setting a value for this key will change the S3 storage class header.
|
||||
# Default behavior is to use the standard class when no value is given.
|
||||
# Example: "GLACIER"
|
||||
|
||||
# AWS_STORAGE_CLASS=""
|
||||
|
||||
# ---
|
||||
|
||||
# Setting this variable will change the S3 default part size for the copy step.
|
||||
# This value is useful when you want to upload large files.
|
||||
# NB : While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
|
||||
# NB: While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
|
||||
# While Minio uses a hard coded value to 10.000. As a workaround, try to set a higher value.
|
||||
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
|
||||
# The unit is in MB and an integer.
|
||||
|
||||
# AWS_PART_SIZE=16
|
||||
# AWS_PART_SIZE="16"
|
||||
|
||||
# You can also backup files to any WebDAV server:
|
||||
########### WEBDAV STORAGE
|
||||
|
||||
# The URL of the remote WebDAV server
|
||||
# Example: "https://webdav.example.com"
|
||||
|
||||
# WEBDAV_URL="https://webdav.example.com"
|
||||
# WEBDAV_URL=""
|
||||
|
||||
# ---
|
||||
|
||||
# The Directory to place the backups to on the WebDAV server.
|
||||
# If the path is not present on the server it will be created.
|
||||
# Example: "/my/directory/"
|
||||
|
||||
# WEBDAV_PATH="/my/directory/"
|
||||
# WEBDAV_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# The username for the WebDAV server
|
||||
# Example: "user"
|
||||
|
||||
# WEBDAV_USERNAME="user"
|
||||
# WEBDAV_USERNAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The password for the WebDAV server
|
||||
# Example: "password"
|
||||
|
||||
# WEBDAV_PASSWORD="password"
|
||||
# WEBDAV_PASSWORD=""
|
||||
|
||||
# Setting this variable to `true` will disable verification of
|
||||
# ---
|
||||
|
||||
# Setting this variable to "true" will disable verification of
|
||||
# SSL certificates for WEBDAV_URL. You shouldn't use this unless you use
|
||||
# self-signed certificates for your remote storage backend.
|
||||
|
||||
# WEBDAV_URL_INSECURE="true"
|
||||
# WEBDAV_URL_INSECURE="false"
|
||||
|
||||
# You can also backup files to any SSH server:
|
||||
########### SSH/SFTP STORAGE
|
||||
|
||||
# The URL of the remote SSH server
|
||||
# The FQDN of the remote SSH server
|
||||
# Example: "server.local"
|
||||
|
||||
# SSH_HOST_NAME="server.local"
|
||||
# SSH_HOST_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The port of the remote SSH server
|
||||
# Optional variable default value is `22`
|
||||
|
||||
# SSH_PORT=2222
|
||||
# SSH_PORT="22"
|
||||
|
||||
# ---
|
||||
|
||||
# The Directory to place the backups to on the SSH server.
|
||||
# Example: "/home/user/backups"
|
||||
|
||||
# SSH_REMOTE_PATH="/my/directory/"
|
||||
# SSH_REMOTE_PATH=""
|
||||
|
||||
# ---
|
||||
|
||||
# The username for the SSH server
|
||||
# Example: "user"
|
||||
|
||||
# SSH_USER="user"
|
||||
# SSH_USER=""
|
||||
|
||||
# ---
|
||||
|
||||
# The password for the SSH server
|
||||
# Example: "password"
|
||||
|
||||
# SSH_PASSWORD="password"
|
||||
# SSH_PASSWORD=""
|
||||
|
||||
# The private key path in container for SSH server
|
||||
# Default value: /root/.ssh/id_rsa
|
||||
# If file is mounted to /root/.ssh/id_rsa path it will be used. Non-RSA keys will
|
||||
# also work.
|
||||
# ---
|
||||
|
||||
# The private key path in container for SSH server.
|
||||
# Consumers can mount a file into /root/.ssh/id_rsa (or the respective value)
|
||||
# in order to have it being used. Non-RSA keys (e.g. ed25519) will also work.
|
||||
|
||||
# SSH_IDENTITY_FILE="/root/.ssh/id_rsa"
|
||||
|
||||
# The passphrase for the identity file
|
||||
# ---
|
||||
|
||||
# SSH_IDENTITY_PASSPHRASE="pass"
|
||||
# The passphrase for the identity file if applicable
|
||||
# Example: "pass"
|
||||
|
||||
# SSH_IDENTITY_PASSPHRASE=""
|
||||
|
||||
########### AZURE BLOB STORAGE
|
||||
|
||||
# The credential's account name when using Azure Blob Storage. This has to be
|
||||
# set when using Azure Blob Storage.
|
||||
# Example: "account-name"
|
||||
|
||||
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
|
||||
# AZURE_STORAGE_ACCOUNT_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The credential's primary account key when using Azure Blob Storage. If this
|
||||
# is not given, the command tries to fall back to using a managed identity.
|
||||
# is not given, the command tries to fall back to using a connection string
|
||||
# (if given) or a managed identity (if neither is set).
|
||||
|
||||
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
|
||||
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY=""
|
||||
|
||||
# ---
|
||||
|
||||
# A connection string for accessing Azure Blob Storage. If this
|
||||
# is not given, the command tries to fall back to using a primary account key
|
||||
# (if given) or a managed identity (if neither is set).
|
||||
|
||||
# AZURE_STORAGE_CONNECTION_STRING=""
|
||||
|
||||
# ---
|
||||
|
||||
# The container name when using Azure Blob Storage.
|
||||
# Example: "container-name"
|
||||
|
||||
# AZURE_STORAGE_CONTAINER_NAME="container-name"
|
||||
# AZURE_STORAGE_CONTAINER_NAME=""
|
||||
|
||||
# ---
|
||||
|
||||
# The service endpoint when using Azure Blob Storage. This is a template that
|
||||
# can be passed the account name as shown in the default value below.
|
||||
|
||||
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
|
||||
|
||||
# ---
|
||||
|
||||
# The access tier when using Azure Blob Storage. Possible values are
|
||||
# https://github.com/Azure/azure-sdk-for-go/blob/sdk/storage/azblob/v1.3.2/sdk/storage/azblob/internal/generated/zz_constants.go#L14-L30
|
||||
# Example: "Cold"
|
||||
|
||||
# AZURE_STORAGE_ACCESS_TIER=""
|
||||
|
||||
########### DROPBOX STORAGE
|
||||
|
||||
# Absolute remote path in your Dropbox where the backups shall be stored.
|
||||
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||
# Consulte the README for further information.
|
||||
# Consult the README for further information.
|
||||
# Example: "/my/directory"
|
||||
|
||||
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||
# DROPBOX_REMOTE_PATH=""
|
||||
|
||||
# Number of concurrent chunked uploads for Dropbox.
|
||||
# Values above 6 usually result in no enhancements.
|
||||
|
||||
# DROPBOX_CONCURRENCY_LEVEL="6"
|
||||
# ---
|
||||
|
||||
# App key and app secret from your app created at https://www.dropbox.com/developers/apps/info
|
||||
|
||||
# DROPBOX_APP_KEY=""
|
||||
# DROPBOX_APP_SECRET=""
|
||||
|
||||
# ---
|
||||
|
||||
# Number of concurrent chunked uploads for Dropbox.
|
||||
# Values above 6 usually result in no enhancements.
|
||||
|
||||
# DROPBOX_CONCURRENCY_LEVEL="6"
|
||||
|
||||
# ---
|
||||
|
||||
# Refresh token to request new short-lived tokens (OAuth2). Consult README to see how to get one.
|
||||
|
||||
# DROPBOX_REFRESH_TOKEN=""
|
||||
|
||||
########### LOCAL FILE STORAGE
|
||||
|
||||
# In addition to storing backups remotely, you can also keep local copies.
|
||||
# Pass a container-local path to store your backups if needed. You also need to
|
||||
# mount a local folder or Docker volume into that location (`/archive`
|
||||
@@ -299,10 +409,12 @@ You can populate below template according to your requirements and use it as you
|
||||
# for such files, or to configure BACKUP_PRUNING_PREFIX to limit
|
||||
# removal to certain files.
|
||||
|
||||
# Define this value to enable automatic rotation of old backups. The value
|
||||
# declares the number of days for which a backup is kept.
|
||||
# Pass zero or a positive integer value to enable automatic rotation of
|
||||
# old backups. The value declares the number of days for which a backup is kept.
|
||||
|
||||
# BACKUP_RETENTION_DAYS="7"
|
||||
# BACKUP_RETENTION_DAYS="-1"
|
||||
|
||||
# ---
|
||||
|
||||
# In case the duration a backup takes fluctuates noticeably in your setup
|
||||
# you can adjust this setting to make sure there are no race conditions
|
||||
@@ -314,6 +426,8 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_PRUNING_LEEWAY="1m"
|
||||
|
||||
# ---
|
||||
|
||||
# In case your target bucket or directory contains other files than the ones
|
||||
# managed by this container, you can limit the scope of rotation by setting
|
||||
# a prefix value. This would usually be the non-parametrized part of your
|
||||
@@ -321,13 +435,37 @@ You can populate below template according to your requirements and use it as you
|
||||
# you can set BACKUP_PRUNING_PREFIX to `db-backup-` and make sure
|
||||
# unrelated files are not affected by the rotation mechanism.
|
||||
|
||||
# BACKUP_PRUNING_PREFIX="backup-"
|
||||
# BACKUP_PRUNING_PREFIX=""
|
||||
|
||||
########### BACKUP ENCRYPTION
|
||||
|
||||
# Backups can be encrypted using gpg in case a passphrase is given.
|
||||
# All of the encryption options are mutually exclusive. Provide a single option
|
||||
# for the encryption scheme of your choice.
|
||||
|
||||
# GPG_PASSPHRASE="<xxx>"
|
||||
# Backups can be encrypted symmetrically using gpg in case a passphrase is given.
|
||||
|
||||
# GPG_PASSPHRASE=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted asymmetrically using gpg in case publickeys are given.
|
||||
# You can use pipe syntax to pass a multiline value.
|
||||
|
||||
# GPG_PUBLIC_KEY_RING=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted symmetrically using age in case a passphrase is given.
|
||||
|
||||
# AGE_PASSPHRASE=""
|
||||
|
||||
# ---
|
||||
|
||||
# Backups can be encrypted asymmetrically using age in case publickeys are given.
|
||||
# Multiple keys need to be provided as a comma separated list. Right now, this
|
||||
# supports `age` and `ssh` keys
|
||||
|
||||
# AGE_PUBLIC_KEYS=""
|
||||
|
||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||
|
||||
@@ -335,18 +473,17 @@ You can populate below template according to your requirements and use it as you
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers and
|
||||
# services that are labeled with `true` will be stopped. If you need more fine
|
||||
# grained control (e.g. when running multiple containers based on this image),
|
||||
# you can override this default by specifying a different value here.
|
||||
# BACKUP_STOP_DURING_BACKUP_LABEL="service1"
|
||||
# you can override this default by specifying a different string value here.
|
||||
# BACKUP_STOP_DURING_BACKUP_LABEL="true"
|
||||
|
||||
# When trying to scale down Docker Swarm services, give up after
|
||||
# the specified amount of time in case the service has not converged yet.
|
||||
# In case you need to adjust this timeout, supply a duration
|
||||
# value as per https://pkg.go.dev/time#ParseDuration to `BACKUP_STOP_SERVICE_TIMEOUT`.
|
||||
# Defaults to 5 minutes.
|
||||
|
||||
# BACKUP_STOP_SERVICE_TIMEOUT="5m"
|
||||
|
||||
########### EXECUTING COMMANDS IN CONTAINERS PRE/POST BACKUP
|
||||
########### EXECUTING COMMANDS IN CONTAINERS DURING THE BACKUP LIFECYCLE
|
||||
|
||||
# It is possible to define commands to be run in any container before and after
|
||||
# a backup is conducted. The commands themselves are defined in labels like
|
||||
@@ -357,29 +494,34 @@ You can populate below template according to your requirements and use it as you
|
||||
# is configured to be "true", command execution output will be forwarded to
|
||||
# the backup container's stdout and stderr.
|
||||
|
||||
# EXEC_FORWARD_OUTPUT="true"
|
||||
# EXEC_FORWARD_OUTPUT="false"
|
||||
|
||||
# ---
|
||||
|
||||
# Without any further configuration, all commands defined in labels will be
|
||||
# run before and after a backup. If you need more fine grained control, you
|
||||
# can use this option to set a label that will be used for narrowing down
|
||||
# the set of eligible containers. When set, an eligible container will also need
|
||||
# to be labeled as `docker-volume-backup.exec-label=database`.
|
||||
# the set of eligible containers. E.g. when setting this to `database`,
|
||||
# an eligible container will also need to be labeled as `docker-volume-backup.exec-label=database`.
|
||||
|
||||
# EXEC_LABEL="database"
|
||||
# EXEC_LABEL=""
|
||||
|
||||
########### NOTIFICATIONS
|
||||
|
||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||
# The content of such notifications can be customized. Dedicated documentation
|
||||
# on how to do this can be found in the README. When providing multiple URLs or
|
||||
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||
|
||||
# The below URL demonstrates how to send an email using the provided SMTP
|
||||
# The following example URL demonstrates how to send an email using the provided SMTP
|
||||
# configuration and credentials.
|
||||
# Example: "smtp://username:password@host:587/?fromAddress=sender@example.com&toAddresses=recipient@example.com"
|
||||
|
||||
# NOTIFICATION_URLS=smtp://username:password@host:587/?fromAddress=sender@example.com&toAddresses=recipient@example.com
|
||||
# NOTIFICATION_URLS=""
|
||||
|
||||
# ---
|
||||
|
||||
# By default, notifications would only be sent out when a backup run fails
|
||||
# To receive notifications for every run, set `NOTIFICATION_LEVEL` to `info`
|
||||
@@ -391,8 +533,9 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# If you are interfacing with Docker via TCP you can set the Docker host here
|
||||
# instead of mounting the Docker socket as a volume. This is unset by default.
|
||||
# Example: "tcp://docker_socket_proxy:2375"
|
||||
|
||||
# DOCKER_HOST="tcp://docker_socket_proxy:2375"
|
||||
# DOCKER_HOST=""
|
||||
|
||||
########### LOCK_TIMEOUT
|
||||
|
||||
@@ -419,20 +562,25 @@ You can populate below template according to your requirements and use it as you
|
||||
# The recipient(s) of the notification. Supply a comma separated list
|
||||
# of addresses if you want to notify multiple recipients. If this is
|
||||
# not set, no emails will be sent.
|
||||
# Example: "you@example.com"
|
||||
|
||||
# EMAIL_NOTIFICATION_RECIPIENT="you@example.com"
|
||||
# EMAIL_NOTIFICATION_RECIPIENT=""
|
||||
|
||||
# The "From" header of the sent email. Defaults to `noreply@nohost`.
|
||||
# ---
|
||||
|
||||
# EMAIL_NOTIFICATION_SENDER="no-reply@example.com"
|
||||
# The "From" header of the sent email.
|
||||
# Example: "no-reply@example.com"
|
||||
|
||||
# EMAIL_NOTIFICATION_SENDER="noreply@nohost"
|
||||
|
||||
# ---
|
||||
|
||||
# Configuration and credentials for the SMTP server to be used.
|
||||
# EMAIL_SMTP_PORT defaults to 587.
|
||||
|
||||
# EMAIL_SMTP_HOST="posteo.de"
|
||||
# EMAIL_SMTP_PASSWORD="<xxx>"
|
||||
# EMAIL_SMTP_USERNAME="no-reply@example.com"
|
||||
# EMAIL_SMTP_PORT="<port>"
|
||||
# EMAIL_SMTP_HOST=""
|
||||
# EMAIL_SMTP_PASSWORD=""
|
||||
# EMAIL_SMTP_USERNAME=""
|
||||
# EMAIL_SMTP_PORT="587"
|
||||
```
|
||||
{% endraw %}
|
||||
|
||||
|
||||
90
go.mod
90
go.mod
@@ -1,78 +1,88 @@
|
||||
module github.com/offen/docker-volume-backup
|
||||
|
||||
go 1.22
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
filippo.io/age v1.2.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/containrrr/shoutrrr v0.8.0
|
||||
github.com/cosiner/argv v0.1.0
|
||||
github.com/docker/cli v24.0.9+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/docker/cli v28.0.0+incompatible
|
||||
github.com/docker/docker v27.1.1+incompatible
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/klauspost/compress v1.17.7
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||
github.com/minio/minio-go/v7 v7.0.67
|
||||
github.com/minio/minio-go/v7 v7.0.87
|
||||
github.com/offen/envconfig v1.5.0
|
||||
github.com/otiai10/copy v1.14.0
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/pkg/sftp v1.13.7
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/studio-b12/gowebdav v0.9.0
|
||||
golang.org/x/crypto v0.19.0
|
||||
golang.org/x/oauth2 v0.17.0
|
||||
golang.org/x/sync v0.6.0
|
||||
mvdan.cc/sh/v3 v3.8.0
|
||||
github.com/studio-b12/gowebdav v0.10.0
|
||||
golang.org/x/crypto v0.33.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/sync v0.11.0
|
||||
mvdan.cc/sh/v3 v3.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/otiai10/mint v1.6.3 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect
|
||||
go.opentelemetry.io/otel v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.1
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
golang.org/x/net v0.35.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package errwrap
|
||||
@@ -21,7 +21,7 @@ func Wrap(err error, msg string) error {
|
||||
chunks := strings.Split(frame.Function, "/")
|
||||
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
||||
if err == nil {
|
||||
return fmt.Errorf(withCaller)
|
||||
return errors.New(withCaller)
|
||||
}
|
||||
return fmt.Errorf("%s: %w", withCaller, err)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package azure
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
@@ -25,6 +28,7 @@ import (
|
||||
type azureBlobStorage struct {
|
||||
*storage.StorageBackend
|
||||
client *azblob.Client
|
||||
uploadStreamOptions *blockblob.UploadStreamOptions
|
||||
containerName string
|
||||
}
|
||||
|
||||
@@ -33,12 +37,18 @@ type Config struct {
|
||||
AccountName string
|
||||
ContainerName string
|
||||
PrimaryAccountKey string
|
||||
ConnectionString string
|
||||
Endpoint string
|
||||
RemotePath string
|
||||
AccessTier string
|
||||
}
|
||||
|
||||
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||
if opts.PrimaryAccountKey != "" && opts.ConnectionString != "" {
|
||||
return nil, errwrap.Wrap(nil, "using primary account key and connection string are mutually exclusive")
|
||||
}
|
||||
|
||||
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
||||
@@ -58,7 +68,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
|
||||
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error creating Azure client")
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from primary account key")
|
||||
}
|
||||
} else if opts.ConnectionString != "" {
|
||||
client, err = azblob.NewClientFromConnectionString(opts.ConnectionString, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from connection string")
|
||||
}
|
||||
} else {
|
||||
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
||||
@@ -67,12 +82,29 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
}
|
||||
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error creating Azure client")
|
||||
return nil, errwrap.Wrap(err, "error creating azure client from managed identity")
|
||||
}
|
||||
}
|
||||
|
||||
var uploadStreamOptions *blockblob.UploadStreamOptions
|
||||
if opts.AccessTier != "" {
|
||||
var found bool
|
||||
for _, t := range blob.PossibleAccessTierValues() {
|
||||
if string(t) == opts.AccessTier {
|
||||
found = true
|
||||
uploadStreamOptions = &blockblob.UploadStreamOptions{
|
||||
AccessTier: &t,
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", opts.AccessTier))
|
||||
}
|
||||
}
|
||||
|
||||
storage := azureBlobStorage{
|
||||
client: client,
|
||||
uploadStreamOptions: uploadStreamOptions,
|
||||
containerName: opts.ContainerName,
|
||||
StorageBackend: &storage.StorageBackend{
|
||||
DestinationPath: opts.RemotePath,
|
||||
@@ -93,12 +125,13 @@ func (b *azureBlobStorage) Copy(file string) error {
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
||||
}
|
||||
|
||||
_, err = b.client.UploadStream(
|
||||
context.Background(),
|
||||
b.containerName,
|
||||
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
||||
path.Join(b.DestinationPath, filepath.Base(file)),
|
||||
fileReader,
|
||||
nil,
|
||||
b.uploadStreamOptions,
|
||||
)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
||||
@@ -109,7 +142,7 @@ func (b *azureBlobStorage) Copy(file string) error {
|
||||
// Prune rotates away backups according to the configuration and provided
|
||||
// deadline for the Azure Blob storage backend.
|
||||
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
|
||||
lookupPrefix := path.Join(b.DestinationPath, pruningPrefix)
|
||||
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
|
||||
Prefix: &lookupPrefix,
|
||||
})
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -195,7 +194,7 @@ loop:
|
||||
_, err = b.client.UploadSessionFinish(
|
||||
files.NewUploadSessionFinishArg(
|
||||
files.NewUploadSessionCursor(sessionId, 0),
|
||||
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||
files.NewCommitInfo(path.Join(b.DestinationPath, name)),
|
||||
), nil)
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error finishing the upload session")
|
||||
@@ -247,7 +246,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(path.Join(b.DestinationPath, match.Name))); err != nil {
|
||||
return errwrap.Wrap(err, "error removing file from Dropbox storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package local
|
||||
@@ -96,7 +96,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
)
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
if !fi.IsDir() && fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package s3
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio-go/v7"
|
||||
@@ -124,7 +123,7 @@ func (b *s3Storage) Copy(file string) error {
|
||||
putObjectOptions.PartSize = uint64(partSize)
|
||||
}
|
||||
|
||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, path.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||
return errwrap.Wrap(
|
||||
nil,
|
||||
@@ -147,7 +146,7 @@ func (b *s3Storage) Copy(file string) error {
|
||||
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
|
||||
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
|
||||
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
|
||||
Prefix: path.Join(b.DestinationPath, pruningPrefix),
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package ssh
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -115,7 +114,7 @@ func (b *sshStorage) Copy(file string) error {
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||
destination, err := b.sftpClient.Create(path.Join(b.DestinationPath, name))
|
||||
if err != nil {
|
||||
return errwrap.Wrap(err, "error creating file")
|
||||
}
|
||||
@@ -164,24 +163,28 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
|
||||
}
|
||||
|
||||
var matches []string
|
||||
var numCandidates int
|
||||
for _, candidate := range candidates {
|
||||
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
if candidate.IsDir() || !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
numCandidates++
|
||||
if candidate.ModTime().Before(deadline) {
|
||||
matches = append(matches, candidate.Name())
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(len(candidates)),
|
||||
Total: uint(numCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), numCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||
return errwrap.Wrap(err, "error removing file")
|
||||
p := path.Join(b.DestinationPath, match)
|
||||
if err := b.sftpClient.Remove(p); err != nil {
|
||||
return errwrap.Wrap(err, fmt.Sprintf("error removing file %s", p))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package storage
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package webdav
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -77,7 +76,7 @@ func (b *webDavStorage) Copy(file string) error {
|
||||
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||
}
|
||||
|
||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||
if err := b.client.WriteStream(path.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||
return errwrap.Wrap(err, "error uploading the file")
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||
@@ -91,26 +90,27 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||
}
|
||||
|
||||
var matches []fs.FileInfo
|
||||
var lenCandidates int
|
||||
var numCandidates int
|
||||
for _, candidate := range candidates {
|
||||
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
if candidate.IsDir() || !strings.HasPrefix(candidate.Name(), pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
lenCandidates++
|
||||
numCandidates++
|
||||
if candidate.ModTime().Before(deadline) {
|
||||
matches = append(matches, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(lenCandidates),
|
||||
Total: uint(numCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||
pruneErr := b.DoPrune(b.Name(), len(matches), numCandidates, deadline, func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||
if err := b.client.Remove(path.Join(b.DestinationPath, match.Name())); err != nil {
|
||||
return errwrap.Wrap(err, "error removing file")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
FROM docker:24-dind
|
||||
FROM docker:27-dind
|
||||
|
||||
RUN apk add \
|
||||
age \
|
||||
coreutils \
|
||||
curl \
|
||||
expect \
|
||||
gpg \
|
||||
gpg-agent \
|
||||
jq \
|
||||
moreutils \
|
||||
tar \
|
||||
|
||||
24
test/age-passphrase/docker-compose.yml
Normal file
24
test/age-passphrase/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
AGE_PASSPHRASE: "Dance.0Tonight.Go.Typical"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
39
test/age-passphrase/run.sh
Executable file
39
test/age-passphrase/run.sh
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename "$(pwd)")
|
||||
|
||||
export LOCAL_DIR="$(mktemp -d)"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
# complex usage of expect(1) due to age not have a way to programmatically
|
||||
# provide the passphrase
|
||||
expect -i <<EOL
|
||||
spawn age --decrypt -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||
expect -exact "Enter passphrase: "
|
||||
send -- "Dance.0Tonight.Go.Typical\r"
|
||||
sleep 1
|
||||
EOL
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
1
test/age-publickey/.gitignore
vendored
Normal file
1
test/age-publickey/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
pk-*.txt
|
||||
24
test/age-publickey/docker-compose.yml
Normal file
24
test/age-publickey/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
AGE_PUBLIC_KEYS: "${BACKUP_AGE_PUBLIC_KEYS}"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
47
test/age-publickey/run.sh
Executable file
47
test/age-publickey/run.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename "$(pwd)")
|
||||
|
||||
export LOCAL_DIR="$(mktemp -d)"
|
||||
|
||||
age-keygen >"$LOCAL_DIR/pk-a.txt"
|
||||
PK_A="$(grep -E 'public key' <"$LOCAL_DIR/pk-a.txt" | cut -d: -f2 | xargs)"
|
||||
age-keygen >"$LOCAL_DIR/pk-b.txt"
|
||||
PK_B="$(grep -E 'public key' <"$LOCAL_DIR/pk-b.txt" | cut -d: -f2 | xargs)"
|
||||
|
||||
ssh-keygen -t ed25519 -m pem -f "$LOCAL_DIR/id_ed25519" -C "docker-volume-backup@local"
|
||||
PK_C="$(cat $LOCAL_DIR/id_ed25519.pub)"
|
||||
|
||||
export BACKUP_AGE_PUBLIC_KEYS="$PK_A,$PK_B,$PK_C"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
do_decrypt() {
|
||||
TMP_DIR=$(mktemp -d)
|
||||
age --decrypt -i "$1" -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
}
|
||||
|
||||
do_decrypt "$LOCAL_DIR/pk-a.txt"
|
||||
do_decrypt "$LOCAL_DIR/pk-b.txt"
|
||||
do_decrypt "$LOCAL_DIR/id_ed25519"
|
||||
@@ -1,8 +1,6 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
storage:
|
||||
image: mcr.microsoft.com/azure-storage/azurite:3.26.0
|
||||
image: mcr.microsoft.com/azure-storage/azurite:3.33.0
|
||||
volumes:
|
||||
- ${DATA_DIR:-./data}:/data
|
||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
||||
@@ -36,6 +34,7 @@ services:
|
||||
AZURE_STORAGE_CONTAINER_NAME: test-container
|
||||
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
||||
AZURE_STORAGE_PATH: 'path/to/backup'
|
||||
AZURE_STORAGE_ACCESS_TIER: Hot
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
@@ -43,7 +42,7 @@ services:
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
hostname: minio.local
|
||||
@@ -32,7 +30,7 @@ services:
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${CERT_DIR:-.}/rootCA.crt:/root/minio-rootCA.crt
|
||||
|
||||
offen:
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v empty_data:/backup/empty_data \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock:ro \
|
||||
--env AWS_ACCESS_KEY_ID=test \
|
||||
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||
--env AWS_ENDPOINT=minio:9000 \
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -11,7 +9,7 @@ services:
|
||||
volumes:
|
||||
- offen_data:/backup/offen_data:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
database:
|
||||
image: mariadb:10.7
|
||||
@@ -44,7 +42,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -14,7 +12,7 @@ services:
|
||||
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
openapi_mock:
|
||||
image: muonsoft/openapi-mock:0.3.9
|
||||
@@ -44,7 +42,7 @@ services:
|
||||
DROPBOX_CONCURRENCY_LEVEL: 6
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -13,7 +11,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-local}:/local
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
25
test/gpg-asym/docker-compose.yml
Normal file
25
test/gpg-asym/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
GPG_PUBLIC_KEY_RING_FILE: /keys/public_key.asc
|
||||
volumes:
|
||||
- ${KEY_DIR:-.}/public_key.asc:/keys/public_key.asc
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
49
test/gpg-asym/run.sh
Executable file
49
test/gpg-asym/run.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
export KEY_DIR=$(mktemp -d)
|
||||
|
||||
export PASSPHRASE="test"
|
||||
|
||||
gpg --batch --gen-key <<EOF
|
||||
Key-Type: RSA
|
||||
Key-Length: 4096
|
||||
Name-Real: offen
|
||||
Name-Email: docker-volume-backup@local
|
||||
Expire-Date: 0
|
||||
Passphrase: $PASSPHRASE
|
||||
%commit
|
||||
EOF
|
||||
|
||||
gpg --export --armor --batch --yes --pinentry-mode loopback --passphrase $PASSPHRASE --output $KEY_DIR/public_key.asc
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
gpg -d --pinentry-mode loopback --yes --passphrase $PASSPHRASE "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||
|
||||
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -13,7 +11,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -15,7 +13,7 @@ services:
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -9,7 +7,7 @@ services:
|
||||
BACKUP_RETENTION_DAYS: '7'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
db:
|
||||
image: postgres:14-alpine
|
||||
|
||||
@@ -23,7 +23,7 @@ docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v $LOCAL_DIR:/archive \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock:ro \
|
||||
--env BACKUP_COMPRESSION=gz \
|
||||
--env GZIP_PARALLELISM=0 \
|
||||
--env BACKUP_FILENAME='test.{{ .Extension }}' \
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -24,7 +22,7 @@ services:
|
||||
TASKS: ${ALLOW_TASKS:-1}
|
||||
NODES: ${ALLOW_NODES:-1}
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
pg:
|
||||
image: postgres:14-alpine
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -21,7 +19,7 @@ services:
|
||||
CONTAINERS: ${ALLOW_CONTAINERS:-1}
|
||||
POST: ${ALLOW_POST:-1}
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
pg:
|
||||
image: postgres:14-alpine
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
@@ -34,7 +32,7 @@ services:
|
||||
BACKUP_SKIP_BACKENDS_FROM_PRUNE: 's3'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
@@ -32,7 +30,7 @@ services:
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
@@ -37,7 +35,7 @@ services:
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
secrets:
|
||||
- minio_root_user
|
||||
- minio_root_password
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
@@ -31,7 +29,7 @@ services:
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
ssh:
|
||||
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||
@@ -32,7 +30,7 @@ services:
|
||||
volumes:
|
||||
- ${KEY_DIR:-.}/id_rsa:/root/.ssh/id_rsa
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
@@ -37,7 +35,7 @@ services:
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
21
test/tar/docker-compose.yml
Normal file
21
test/tar/docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_FILENAME: test.{{ .Extension }}
|
||||
BACKUP_COMPRESSION: none
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
25
test/tar/run.sh
Executable file
25
test/tar/run.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
tar -xvf "$LOCAL_DIR/test.tar" -C $tmp_dir
|
||||
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
pass "Expected file was found."
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '2.4'
|
||||
|
||||
services:
|
||||
alpine:
|
||||
image: alpine:3.17.3
|
||||
@@ -22,7 +20,7 @@ services:
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
webdav:
|
||||
image: bytemark/webdav:2.4
|
||||
@@ -30,7 +28,7 @@ services:
|
||||
WEBDAV_PASSWORD: test
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -23,7 +23,7 @@ docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v $LOCAL_DIR:/archive \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock:ro \
|
||||
--env BACKUP_COMPRESSION=zst \
|
||||
--env BACKUP_FILENAME='test.{{ .Extension }}' \
|
||||
--entrypoint backup \
|
||||
|
||||
Reference in New Issue
Block a user