mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
90 Commits
v2.39.1
...
retention-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a853a5b03 | ||
|
|
378217e517 | ||
|
|
23756074f9 | ||
|
|
40b12b9d07 | ||
|
|
e628f09122 | ||
|
|
7340e00dab | ||
|
|
958585336a | ||
|
|
68b7e4d678 | ||
|
|
857e4fc605 | ||
|
|
8d26194809 | ||
|
|
3063288d1e | ||
|
|
02fdfb363c | ||
|
|
2ee23a9384 | ||
|
|
16be0c0217 | ||
|
|
4799795f0a | ||
|
|
49b8d2f8d8 | ||
|
|
e4beef200a | ||
|
|
e75ab8bdd8 | ||
|
|
a4145352f9 | ||
|
|
615256cda9 | ||
|
|
40c4f11d70 | ||
|
|
2685571c58 | ||
|
|
04ad0777e0 | ||
|
|
50e41eac02 | ||
|
|
94e59a102e | ||
|
|
964a5e0342 | ||
|
|
2363c3c9cb | ||
|
|
6bc66db833 | ||
|
|
de40eae4de | ||
|
|
731421e359 | ||
|
|
d46918b13a | ||
|
|
2fb63059b3 | ||
|
|
e0fcbd27e5 | ||
|
|
f4884bf190 | ||
|
|
52787a1e42 | ||
|
|
6e08ae7c39 | ||
|
|
0183db831b | ||
|
|
f481fda848 | ||
|
|
f4cf4173e6 | ||
|
|
681983608f | ||
|
|
45335ffb67 | ||
|
|
01a595607d | ||
|
|
119391e8df | ||
|
|
dd5f7f5b66 | ||
|
|
c54a5bef5f | ||
|
|
8fac9608ff | ||
|
|
3ee40b6422 | ||
|
|
8b5c9a494f | ||
|
|
44ad3bbda2 | ||
|
|
74e065cbb9 | ||
|
|
8a64da4b0b | ||
|
|
f97ce11734 | ||
|
|
336e12f874 | ||
|
|
016c6c8307 | ||
|
|
e22f317fbb | ||
|
|
e04bd2f066 | ||
|
|
c4eeaad813 | ||
|
|
5840f1c5dc | ||
|
|
d71b7304c2 | ||
|
|
fbc7f85d9f | ||
|
|
2af5bdf4d9 | ||
|
|
631ca3e07d | ||
|
|
3d35d7c00e | ||
|
|
954bde73fb | ||
|
|
ab46e96706 | ||
|
|
ab4ce94534 | ||
|
|
e4170addb6 | ||
|
|
b8410bbdc5 | ||
|
|
24e1341589 | ||
|
|
3d0286472b | ||
|
|
bb11ae035b | ||
|
|
9209037ed9 | ||
|
|
2e73dea4f7 | ||
|
|
7dc3ae17e7 | ||
|
|
9d5ea718a0 | ||
|
|
272495ae7d | ||
|
|
8beb28d4f8 | ||
|
|
0ec2e68076 | ||
|
|
b85afa6008 | ||
|
|
4cb47a4818 | ||
|
|
9b5ba8958d | ||
|
|
0327701e2d | ||
|
|
58f26ba004 | ||
|
|
f62ef6e05a | ||
|
|
40924434e4 | ||
|
|
e613f6046f | ||
|
|
292d47eb19 | ||
|
|
7637975e3f | ||
|
|
c47a14c53a | ||
|
|
9f795761d6 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,3 +0,0 @@
|
|||||||
github: offen
|
|
||||||
patreon: offen
|
|
||||||
|
|
||||||
4
.github/workflows/deploy-docs.yml
vendored
4
.github/workflows/deploy-docs.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v1
|
uses: actions/upload-pages-artifact@v3
|
||||||
with:
|
with:
|
||||||
path: 'docs/_site/'
|
path: 'docs/_site/'
|
||||||
|
|
||||||
@@ -52,4 +52,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v1
|
uses: actions/deploy-pages@v4
|
||||||
|
|||||||
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.23'
|
||||||
cache: false
|
cache: false
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
# Require: The version of golangci-lint to use.
|
# Require: The version of golangci-lint to use.
|
||||||
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
||||||
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
||||||
version: v1.54
|
version: v1.60
|
||||||
|
|
||||||
# Optional: working directory, useful for monorepos
|
# Optional: working directory, useful for monorepos
|
||||||
# working-directory: somedir
|
# working-directory: somedir
|
||||||
|
|||||||
2
.github/workflows/unit.yml
vendored
2
.github/workflows/unit.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: '1.22.x'
|
go-version: '1.23.x'
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: go mod download
|
run: go mod download
|
||||||
- name: Test with the Go CLI
|
- name: Test with the Go CLI
|
||||||
|
|||||||
@@ -5,4 +5,5 @@ linters:
|
|||||||
- staticcheck
|
- staticcheck
|
||||||
- govet
|
- govet
|
||||||
output:
|
output:
|
||||||
format: github-actions
|
formats:
|
||||||
|
- format: colored-line-number
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2022 - offen.software <hioffen@posteo.de>
|
# Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.22-alpine as builder
|
FROM golang:1.23-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -9,7 +9,7 @@ RUN go mod download
|
|||||||
WORKDIR /app/cmd/backup
|
WORKDIR /app/cmd/backup
|
||||||
RUN go build -o backup .
|
RUN go build -o backup .
|
||||||
|
|
||||||
FROM alpine:3.19
|
FROM alpine:3.21
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ docker run --rm \
|
|||||||
offen/docker-volume-backup:v2
|
offen/docker-volume-backup:v2
|
||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described [in the docs](https://offen.github.io/docker-volume-backup/reference/).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
_, outputFilePath, err := makeAbsolute(stripTrailingSlashes(inputFilePath), outputFilePath)
|
||||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error transposing given file paths")
|
return errwrap.Wrap(err, "error transposing given file paths")
|
||||||
}
|
}
|
||||||
@@ -31,7 +30,7 @@ func createArchive(files []string, inputFilePath, outputFilePath string, compres
|
|||||||
return errwrap.Wrap(err, "error creating output file path")
|
return errwrap.Wrap(err, "error creating output file path")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
if err := compress(files, outputFilePath, compression, compressionConcurrency); err != nil {
|
||||||
return errwrap.Wrap(err, "error creating archive")
|
return errwrap.Wrap(err, "error creating archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,7 +54,7 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
|||||||
return inputFilePath, outputFilePath, err
|
return inputFilePath, outputFilePath, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
func compress(paths []string, outFilePath, algo string, concurrency int) error {
|
||||||
file, err := os.Create(outFilePath)
|
file, err := os.Create(outFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error creating out file")
|
return errwrap.Wrap(err, "error creating out file")
|
||||||
@@ -94,6 +93,8 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
|||||||
|
|
||||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||||
switch algo {
|
switch algo {
|
||||||
|
case "none":
|
||||||
|
return &passThroughWriteCloser{file}, nil
|
||||||
case "gz":
|
case "gz":
|
||||||
w, err := pgzip.NewWriterLevel(file, 5)
|
w, err := pgzip.NewWriterLevel(file, 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -166,3 +167,15 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type passThroughWriteCloser struct {
|
||||||
|
target io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Write(b []byte) (int, error) {
|
||||||
|
return p.target.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -131,12 +131,8 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
c.logger.Warn(
|
c.logger.Warn(
|
||||||
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return errwrap.Wrap(err, "error scheduling")
|
|
||||||
}
|
|
||||||
c.schedules = append(c.schedules, id)
|
|
||||||
}
|
}
|
||||||
|
c.schedules = append(c.schedules, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -38,6 +38,7 @@ type Config struct {
|
|||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
BackupCronExpression string `split_words:"true" default:"@daily"`
|
BackupCronExpression string `split_words:"true" default:"@daily"`
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
|
BackupRetentionPeriod time.Duration `split_words:"true"`
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
BackupStopContainerLabel string `split_words:"true"`
|
BackupStopContainerLabel string `split_words:"true"`
|
||||||
@@ -47,6 +48,9 @@ type Config struct {
|
|||||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||||
GpgPassphrase string `split_words:"true"`
|
GpgPassphrase string `split_words:"true"`
|
||||||
|
GpgPublicKeyRing string `split_words:"true"`
|
||||||
|
AgePassphrase string `split_words:"true"`
|
||||||
|
AgePublicKeys []string `split_words:"true"`
|
||||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
NotificationLevel string `split_words:"true" default:"error"`
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
@@ -76,6 +80,7 @@ type Config struct {
|
|||||||
AzureStorageContainerName string `split_words:"true"`
|
AzureStorageContainerName string `split_words:"true"`
|
||||||
AzureStoragePath string `split_words:"true"`
|
AzureStoragePath string `split_words:"true"`
|
||||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||||
|
AzureStorageAccessTier string `split_words:"true"`
|
||||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||||
DropboxRefreshToken string `split_words:"true"`
|
DropboxRefreshToken string `split_words:"true"`
|
||||||
@@ -91,7 +96,7 @@ type CompressionType string
|
|||||||
|
|
||||||
func (c *CompressionType) Decode(v string) error {
|
func (c *CompressionType) Decode(v string) error {
|
||||||
switch v {
|
switch v {
|
||||||
case "gz", "zst":
|
case "none", "gz", "zst":
|
||||||
*c = CompressionType(v)
|
*c = CompressionType(v)
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -4,61 +4,209 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"filippo.io/age"
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
func countTrue(b ...bool) int {
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
c := int(0)
|
||||||
|
for _, v := range b {
|
||||||
|
if v {
|
||||||
|
c++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase or publickey(s).
|
||||||
|
// In case no passphrase or publickey is given it returns early, leaving the backup file
|
||||||
// untouched.
|
// untouched.
|
||||||
func (s *script) encryptArchive() error {
|
func (s *script) encryptArchive() error {
|
||||||
if s.c.GpgPassphrase == "" {
|
useGPGSymmetric := s.c.GpgPassphrase != ""
|
||||||
|
useGPGAsymmetric := s.c.GpgPublicKeyRing != ""
|
||||||
|
useAgeSymmetric := s.c.AgePassphrase != ""
|
||||||
|
useAgeAsymmetric := len(s.c.AgePublicKeys) > 0
|
||||||
|
switch nconfigured := countTrue(
|
||||||
|
useGPGSymmetric,
|
||||||
|
useGPGAsymmetric,
|
||||||
|
useAgeSymmetric,
|
||||||
|
useAgeAsymmetric,
|
||||||
|
); nconfigured {
|
||||||
|
case 0:
|
||||||
return nil
|
return nil
|
||||||
|
case 1:
|
||||||
|
// ok!
|
||||||
|
default:
|
||||||
|
return fmt.Errorf(
|
||||||
|
"error in selecting archive encryption method: expected 0 or 1 to be configured, %d methods are configured",
|
||||||
|
nconfigured,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
if useGPGSymmetric {
|
||||||
|
return s.encryptWithGPGSymmetric()
|
||||||
|
} else if useGPGAsymmetric {
|
||||||
|
return s.encryptWithGPGAsymmetric()
|
||||||
|
} else if useAgeSymmetric || useAgeAsymmetric {
|
||||||
|
ar, err := s.getConfiguredAgeRecipients()
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "failed to get configured age recipients")
|
||||||
|
}
|
||||||
|
return s.encryptWithAge(ar)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) getConfiguredAgeRecipients() ([]age.Recipient, error) {
|
||||||
|
if s.c.AgePassphrase == "" && len(s.c.AgePublicKeys) == 0 {
|
||||||
|
return nil, fmt.Errorf("no age recipients configured")
|
||||||
|
}
|
||||||
|
recipients := []age.Recipient{}
|
||||||
|
if len(s.c.AgePublicKeys) > 0 {
|
||||||
|
for _, pk := range s.c.AgePublicKeys {
|
||||||
|
pkr, err := age.ParseX25519Recipient(pk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "failed to parse age public key")
|
||||||
|
}
|
||||||
|
recipients = append(recipients, pkr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.c.AgePassphrase != "" {
|
||||||
|
if len(recipients) != 0 {
|
||||||
|
return nil, fmt.Errorf("age encryption must only be enabled via passphrase or public key, not both")
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := age.NewScryptRecipient(s.c.AgePassphrase)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "failed to create scrypt identity from age passphrase")
|
||||||
|
}
|
||||||
|
recipients = append(recipients, r)
|
||||||
|
}
|
||||||
|
return recipients, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) encryptWithAge(rec []age.Recipient) error {
|
||||||
|
return s.doEncrypt("age", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||||
|
return age.Encrypt(ciphertextWriter, rec...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) encryptWithGPGSymmetric() error {
|
||||||
|
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (io.WriteCloser, error) {
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
return openpgp.SymmetricallyEncrypt(ciphertextWriter, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type closeAllWriter struct {
|
||||||
|
io.Writer
|
||||||
|
closers []io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *closeAllWriter) Close() (err error) {
|
||||||
|
for _, cl := range c.closers {
|
||||||
|
err = errors.Join(err, cl.Close())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.WriteCloser = (*closeAllWriter)(nil)
|
||||||
|
|
||||||
|
func (s *script) encryptWithGPGAsymmetric() error {
|
||||||
|
return s.doEncrypt("gpg", func(ciphertextWriter io.Writer) (_ io.WriteCloser, outerr error) {
|
||||||
|
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(s.c.GpgPublicKeyRing)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "error parsing armored keyring")
|
||||||
|
}
|
||||||
|
|
||||||
|
armoredWriter, err := armor.Encode(ciphertextWriter, "PGP MESSAGE", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "error preparing encryption")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if outerr != nil {
|
||||||
|
_ = armoredWriter.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
encWriter, err := openpgp.Encrypt(armoredWriter, entityList, nil, nil, &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &closeAllWriter{
|
||||||
|
Writer: encWriter,
|
||||||
|
closers: []io.Closer{encWriter, armoredWriter},
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) doEncrypt(
|
||||||
|
extension string,
|
||||||
|
encryptor func(ciphertextWriter io.Writer) (io.WriteCloser, error),
|
||||||
|
) (outerr error) {
|
||||||
|
encFile := fmt.Sprintf("%s.%s", s.file, extension)
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
if err := remove(gpgFile); err != nil {
|
if err := remove(encFile); err != nil {
|
||||||
return errwrap.Wrap(err, "error removing gpg file")
|
return errwrap.Wrap(err, "error removing encrypted file")
|
||||||
}
|
}
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
fmt.Sprintf("Removed encrypted file `%s`.", encFile),
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
outFile, err := os.Create(gpgFile)
|
outFile, err := os.Create(encFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error opening out file")
|
return errwrap.Wrap(err, "error opening out file")
|
||||||
}
|
}
|
||||||
defer outFile.Close()
|
defer func() {
|
||||||
|
if err := outFile.Close(); err != nil {
|
||||||
|
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing out file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
_, name := path.Split(s.file)
|
dst, err := encryptor(outFile)
|
||||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
|
||||||
FileName: name,
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error encrypting backup file")
|
return errwrap.Wrap(err, "error encrypting backup file")
|
||||||
}
|
}
|
||||||
defer dst.Close()
|
defer func() {
|
||||||
|
if err := dst.Close(); err != nil {
|
||||||
|
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing encrypted backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
src, err := os.Open(s.file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file %q", s.file))
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := src.Close(); err != nil {
|
||||||
|
outerr = errors.Join(outerr, errwrap.Wrap(err, "error closing backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
return errwrap.Wrap(err, "error writing ciphertext to file")
|
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.file = gpgFile
|
s.file = encFile
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
fmt.Sprintf("Encrypted backup using %q, saving as %q", extension, s.file),
|
||||||
)
|
)
|
||||||
return nil
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cosiner/argv"
|
"github.com/cosiner/argv"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
@@ -24,11 +24,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
|
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
|
||||||
args, _ := argv.Argv(command, nil, nil)
|
args, err := argv.Argv(command, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrap(err, fmt.Sprintf("error parsing argv from '%s'", command))
|
||||||
|
}
|
||||||
|
if len(args) == 0 {
|
||||||
|
return nil, nil, errwrap.Wrap(nil, "received unexpected empty command")
|
||||||
|
}
|
||||||
|
|
||||||
commandEnv := []string{
|
commandEnv := []string{
|
||||||
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
||||||
}
|
}
|
||||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
|
||||||
|
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, container.ExecOptions{
|
||||||
Cmd: args[0],
|
Cmd: args[0],
|
||||||
AttachStdin: true,
|
AttachStdin: true,
|
||||||
AttachStderr: true,
|
AttachStderr: true,
|
||||||
@@ -39,7 +47,7 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
|||||||
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, container.ExecStartOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
||||||
}
|
}
|
||||||
@@ -96,7 +104,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersWithCommand, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -109,7 +117,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Key: "label",
|
Key: "label",
|
||||||
Value: "docker-volume-backup.exec-pre",
|
Value: "docker-volume-backup.exec-pre",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -126,7 +134,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Key: "label",
|
Key: "label",
|
||||||
Value: "docker-volume-backup.exec-post",
|
Value: "docker-volume-backup.exec-post",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -17,11 +17,18 @@ import (
|
|||||||
// the given configuration. In case the given configuration would delete all
|
// the given configuration. In case the given configuration would delete all
|
||||||
// backups, it does nothing instead and logs a warning.
|
// backups, it does nothing instead and logs a warning.
|
||||||
func (s *script) pruneBackups() error {
|
func (s *script) pruneBackups() error {
|
||||||
if s.c.BackupRetentionDays < 0 {
|
if s.c.BackupRetentionDays < 0 && s.c.BackupRetentionPeriod == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
var deadline time.Time
|
||||||
|
if s.c.BackupRetentionPeriod != 0 {
|
||||||
|
deadline = time.Now().Add(-s.c.BackupRetentionPeriod)
|
||||||
|
} else {
|
||||||
|
s.logger.Warn("Using BACKUP_RETENTION_DAYS has been deprecated and will be removed in the next major version. Please use BACKUP_RETENTION_PERIOD instead.")
|
||||||
|
deadline = time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays))
|
||||||
|
}
|
||||||
|
deadline = deadline.Add(s.c.BackupPruningLeeway)
|
||||||
|
|
||||||
eg := errgroup.Group{}
|
eg := errgroup.Group{}
|
||||||
for _, backend := range s.storages {
|
for _, backend := range s.storages {
|
||||||
|
|||||||
@@ -86,7 +86,12 @@ func (s *script) init() error {
|
|||||||
|
|
||||||
var bf bytes.Buffer
|
var bf bytes.Buffer
|
||||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
"Extension": func() string {
|
||||||
|
if s.c.BackupCompression == "none" {
|
||||||
|
return "tar"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("tar.%s", s.c.BackupCompression)
|
||||||
|
}(),
|
||||||
}); tErr != nil {
|
}); tErr != nil {
|
||||||
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||||
}
|
}
|
||||||
@@ -194,6 +199,7 @@ func (s *script) init() error {
|
|||||||
Endpoint: s.c.AzureStorageEndpoint,
|
Endpoint: s.c.AzureStorageEndpoint,
|
||||||
RemotePath: s.c.AzureStoragePath,
|
RemotePath: s.c.AzureStoragePath,
|
||||||
ConnectionString: s.c.AzureStorageConnectionString,
|
ConnectionString: s.c.AzureStorageConnectionString,
|
||||||
|
AccessTier: s.c.AzureStorageAccessTier,
|
||||||
}
|
}
|
||||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -219,6 +225,10 @@ func (s *script) init() error {
|
|||||||
s.storages = append(s.storages, dropboxBackend)
|
s.storages = append(s.storages, dropboxBackend)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.c.BackupRetentionDays > -1 && s.c.BackupRetentionPeriod > 0 {
|
||||||
|
return errwrap.Wrap(nil, "both BACKUP_RETENTION_DAYS and BACKUP_RETENTION_PERIOD were configured, which are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
if s.c.EmailNotificationRecipient != "" {
|
if s.c.EmailNotificationRecipient != "" {
|
||||||
emailURL := fmt.Sprintf(
|
emailURL := fmt.Sprintf(
|
||||||
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||||
|
|||||||
@@ -14,9 +14,11 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/cli/cli/command/service/progress"
|
"github.com/docker/cli/cli/command/service/progress"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
ctr "github.com/docker/docker/api/types/container"
|
ctr "github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
@@ -65,7 +67,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
case <-poll.C:
|
case <-poll.C:
|
||||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containers, err := cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
||||||
@@ -82,7 +84,7 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isSwarm(c interface {
|
func isSwarm(c interface {
|
||||||
Info(context.Context) (types.Info, error)
|
Info(context.Context) (system.Info, error)
|
||||||
}) (bool, error) {
|
}) (bool, error) {
|
||||||
info, err := c.Info(context.Background())
|
info, err := c.Info(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -123,11 +125,11 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
labelValue,
|
labelValue,
|
||||||
)
|
)
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
allContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, errwrap.Wrap(err, "error querying for containers")
|
return noop, errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: filterMatchLabel,
|
Value: filterMatchLabel,
|
||||||
@@ -309,7 +311,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
if err := s.cli.ContainerStart(context.Background(), container.ID, ctr.StartOptions{}); err != nil {
|
||||||
restartErrors = append(restartErrors, err)
|
restartErrors = append(restartErrors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,16 +5,16 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockInfoClient struct {
|
type mockInfoClient struct {
|
||||||
result types.Info
|
result system.Info
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockInfoClient) Info(context.Context) (types.Info, error) {
|
func (m *mockInfoClient) Info(context.Context) (system.Info, error) {
|
||||||
return m.result, m.err
|
return m.result, m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ func TestIsSwarm(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"swarm",
|
"swarm",
|
||||||
&mockInfoClient{
|
&mockInfoClient{
|
||||||
result: types.Info{
|
result: system.Info{
|
||||||
Swarm: swarm.Info{
|
Swarm: swarm.Info{
|
||||||
LocalNodeState: swarm.LocalNodeStateActive,
|
LocalNodeState: swarm.LocalNodeStateActive,
|
||||||
},
|
},
|
||||||
@@ -40,7 +40,7 @@ func TestIsSwarm(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"compose",
|
"compose",
|
||||||
&mockInfoClient{
|
&mockInfoClient{
|
||||||
result: types.Info{
|
result: system.Info{
|
||||||
Swarm: swarm.Info{
|
Swarm: swarm.Info{
|
||||||
LocalNodeState: swarm.LocalNodeStateInactive,
|
LocalNodeState: swarm.LocalNodeStateInactive,
|
||||||
},
|
},
|
||||||
@@ -52,7 +52,7 @@ func TestIsSwarm(t *testing.T) {
|
|||||||
{
|
{
|
||||||
"balena",
|
"balena",
|
||||||
&mockInfoClient{
|
&mockInfoClient{
|
||||||
result: types.Info{
|
result: system.Info{
|
||||||
Swarm: swarm.Info{
|
Swarm: swarm.Info{
|
||||||
LocalNodeState: "",
|
LocalNodeState: "",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ GEM
|
|||||||
rb-fsevent (0.11.2)
|
rb-fsevent (0.11.2)
|
||||||
rb-inotify (0.10.1)
|
rb-inotify (0.10.1)
|
||||||
ffi (~> 1.0)
|
ffi (~> 1.0)
|
||||||
rexml (3.2.6)
|
rexml (3.3.9)
|
||||||
rouge (3.30.0)
|
rouge (3.30.0)
|
||||||
safe_yaml (1.0.5)
|
safe_yaml (1.0.5)
|
||||||
sassc (2.4.0)
|
sassc (2.4.0)
|
||||||
|
|||||||
@@ -7,7 +7,8 @@ nav_order: 3
|
|||||||
|
|
||||||
# Automatically prune old backups
|
# Automatically prune old backups
|
||||||
|
|
||||||
When `BACKUP_RETENTION_DAYS` is configured, the command will check if there are any archives in the remote storage backend(s) or local archive that are older than the given retention value and rotate these backups away.
|
When `BACKUP_RETENTION_PERIOD` is configured, the command will check if there are any archives in the remote storage backend(s) or local archive that are older than the given retention value and rotate these backups away.
|
||||||
|
The value is a duration as per Go's [`time.ParseDuration`][duration].
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
Be aware that this mechanism looks at __all files in the target bucket or archive__, which means that other files that are older than the given deadline are deleted as well.
|
Be aware that this mechanism looks at __all files in the target bucket or archive__, which means that other files that are older than the given deadline are deleted as well.
|
||||||
@@ -23,7 +24,7 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_PRUNING_PREFIX: backup-
|
BACKUP_PRUNING_PREFIX: backup-
|
||||||
BACKUP_RETENTION_DAYS: '7'
|
BACKUP_RETENTION_PERIOD: '168h'
|
||||||
volumes:
|
volumes:
|
||||||
- ${HOME}/backups:/archive
|
- ${HOME}/backups:/archive
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
@@ -32,3 +33,5 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
[duration]: https://pkg.go.dev/time#ParseDuration
|
||||||
|
|||||||
@@ -3,15 +3,7 @@ title: Encrypt backups using GPG
|
|||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 7
|
nav_order: 7
|
||||||
|
nav_exclude: true
|
||||||
---
|
---
|
||||||
|
|
||||||
# Encrypt backups using GPG
|
See: [Encrypt Backups](encrypt-backups)
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box.
|
|
||||||
In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
|
||||||
|
|
||||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
|
||||||
|
|
||||||
```console
|
|
||||||
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
|
||||||
```
|
|
||||||
|
|||||||
32
docs/how-tos/encrypt-backups.md
Normal file
32
docs/how-tos/encrypt-backups.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
title: Encrypting backups
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 7
|
||||||
|
---
|
||||||
|
|
||||||
|
# Encrypting backups
|
||||||
|
|
||||||
|
The image supports encrypting backups using one of two available methods: **GPG** or **[age](https://age-encryption.org/)**
|
||||||
|
|
||||||
|
## Using GPG encryption
|
||||||
|
|
||||||
|
In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY_RING` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||||
|
|
||||||
|
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||||
|
|
||||||
|
```console
|
||||||
|
gpg -o backup.tar.gz -d backup.tar.gz.gpg
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using age encryption
|
||||||
|
|
||||||
|
{: .note }
|
||||||
|
Even though the `age` CLI tools supports encryption using SSH keys, this is not supported by this tool.
|
||||||
|
`AGE_PUBLIC_KEYS` currently expects `age` keys to be given.
|
||||||
|
|
||||||
|
age allows backups to be encrypted with either a symmetric key (password) or a public key. One of those options are available for use.
|
||||||
|
|
||||||
|
Given `AGE_PASSPHRASE` being provided, the backup archive will be encrypted with the passphrase and saved as a `.age` file instead. Refer to age documentation for how to properly decrypt.
|
||||||
|
|
||||||
|
Given `AGE_PUBLIC_KEYS` being provided (allowing multiple by separating each public key with `,`), the backup archive will be encrypted with the provided public keys. It will also result in the archive being saved as a `.age` file.
|
||||||
@@ -9,6 +9,11 @@ parent: How Tos
|
|||||||
|
|
||||||
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||||
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
||||||
|
|
||||||
|
{: .important }
|
||||||
|
In a multi-node Swarm setup, commands can currently only be run on the node the `offen/docker-volume-backup` container is running on.
|
||||||
|
Labeled containers on other nodes are not visible to the backup command.
|
||||||
|
|
||||||
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
||||||
|
|
||||||
- `archive` (the tar archive is created)
|
- `archive` (the tar archive is created)
|
||||||
|
|||||||
@@ -33,5 +33,7 @@ Note: Using the "Generated access token" in the app console is not supported, as
|
|||||||
|
|
||||||
## Other parameters
|
## Other parameters
|
||||||
|
|
||||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, you can only write in the app's directory!
|
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, `DROPBOX_REMOTE_PATH` will be a relative path under the App folder!
|
||||||
This means, that `DROPBOX_REMOTE_PATH` must start with e.g. `/Apps/YOUR_APP_NAME` or `/Apps/YOUR_APP_NAME/some_sub_dir`
|
(_For example, DROPBOX_REMOTE_PATH=/somedir means the backup file will be uploaded to /Apps/myapp/somedir_)
|
||||||
|
On the other hand if you chose `Full Dropbox` access, the value for `DROPBOX_REMOTE_PATH` will represent an absolute path inside your Dropbox storage area.
|
||||||
|
(_Still considering the same example above, the backup file will be uploaded to /somedir in your Dropbox root_)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ services:
|
|||||||
Notification backends other than email are also supported.
|
Notification backends other than email are also supported.
|
||||||
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
||||||
|
|
||||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ services:
|
|||||||
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
||||||
DROPBOX_APP_KEY: APP_KEY # replace
|
DROPBOX_APP_KEY: APP_KEY # replace
|
||||||
DROPBOX_APP_SECRET: APP_SECRET # replace
|
DROPBOX_APP_SECRET: APP_SECRET # replace
|
||||||
DROPBOX_REMOTE_PATH: /Apps/my-test-app/some_subdir # replace
|
DROPBOX_REMOTE_PATH: /somedir # replace
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
@@ -280,7 +280,7 @@ services:
|
|||||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
BACKUP_PRUNING_PREFIX: backup-
|
BACKUP_PRUNING_PREFIX: backup-
|
||||||
BACKUP_RETENTION_DAYS: 7
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
@@ -289,7 +289,7 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Encrypting your backups using GPG
|
## Encrypting your backups symmetrically using GPG
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
@@ -311,7 +311,34 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using mysqldump to prepare the backup
|
## Encrypting your backups asymmetrically using GPG
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
GPG_PUBLIC_KEY_RING: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
...
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using mariadb-dump/mysqldump to prepare the backup
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
@@ -320,7 +347,7 @@ services:
|
|||||||
database:
|
database:
|
||||||
image: mariadb:latest
|
image: mariadb:latest
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.archive-pre=/bin/sh -c 'mysqldump -psecret --all-databases > /tmp/dumps/dump.sql'
|
- docker-volume-backup.archive-pre=/bin/sh -c 'mariadb-dump -psecret --all-databases > /tmp/dumps/dump.sql'
|
||||||
volumes:
|
volumes:
|
||||||
- data:/tmp/dumps
|
- data:/tmp/dumps
|
||||||
backup:
|
backup:
|
||||||
|
|||||||
@@ -43,8 +43,8 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
|
||||||
# The compression algorithm used in conjunction with tar.
|
# The compression algorithm used in conjunction with tar.
|
||||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
# Valid options are: "gz" (Gzip), "zst" (Zstd) or "none" (tar only).
|
||||||
# Note that the selection affects the file extension.
|
# Default is "gz". Note that the selection affects the file extension.
|
||||||
|
|
||||||
# BACKUP_COMPRESSION="gz"
|
# BACKUP_COMPRESSION="gz"
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# will result in the same filename for every backup run, which means previous
|
# will result in the same filename for every backup run, which means previous
|
||||||
# versions will be overwritten on subsequent runs.
|
# versions will be overwritten on subsequent runs.
|
||||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
# in which case it will become either "tar.gz", "tar.zst" or ".tar" (depending
|
||||||
# on your BACKUP_COMPRESSION setting).
|
# on your BACKUP_COMPRESSION setting).
|
||||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||||
|
|
||||||
@@ -269,6 +269,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||||
# Consulte the README for further information.
|
# Consulte the README for further information.
|
||||||
|
|
||||||
|
# The access tier when using Azure Blob Storage. Possible values are
|
||||||
|
# https://github.com/Azure/azure-sdk-for-go/blob/sdk/storage/azblob/v1.3.2/sdk/storage/azblob/internal/generated/zz_constants.go#L14-L30
|
||||||
|
|
||||||
|
# AZURE_STORAGE_ACCESS_TIER="Cold"
|
||||||
|
|
||||||
# DROPBOX_REMOTE_PATH="/my/directory"
|
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||||
|
|
||||||
# Number of concurrent chunked uploads for Dropbox.
|
# Number of concurrent chunked uploads for Dropbox.
|
||||||
@@ -307,9 +312,10 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# removal to certain files.
|
# removal to certain files.
|
||||||
|
|
||||||
# Define this value to enable automatic rotation of old backups. The value
|
# Define this value to enable automatic rotation of old backups. The value
|
||||||
# declares the number of days for which a backup is kept.
|
# declares the duration for which a backup is kept. It is formatted as per
|
||||||
|
# https://pkg.go.dev/time#ParseDuration, e.g. 1 day turns into `24h`
|
||||||
|
|
||||||
# BACKUP_RETENTION_DAYS="7"
|
# BACKUP_RETENTION_PERIOD="168h"
|
||||||
|
|
||||||
# In case the duration a backup takes fluctuates noticeably in your setup
|
# In case the duration a backup takes fluctuates noticeably in your setup
|
||||||
# you can adjust this setting to make sure there are no race conditions
|
# you can adjust this setting to make sure there are no race conditions
|
||||||
@@ -332,10 +338,32 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
########### BACKUP ENCRYPTION
|
########### BACKUP ENCRYPTION
|
||||||
|
|
||||||
# Backups can be encrypted using gpg in case a passphrase is given.
|
# All of the encryption options are mutually exclusive. Provide a single option
|
||||||
|
# for the encryption scheme of your choice.
|
||||||
|
|
||||||
|
# Backups can be encrypted symmetrically using gpg in case a passphrase is given.
|
||||||
|
|
||||||
# GPG_PASSPHRASE="<xxx>"
|
# GPG_PASSPHRASE="<xxx>"
|
||||||
|
|
||||||
|
# Backups can be encrypted asymmetrically using gpg in case publickeys are given.
|
||||||
|
|
||||||
|
# GPG_PUBLIC_KEY_RING= |
|
||||||
|
#-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
#
|
||||||
|
#D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
#...
|
||||||
|
#-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
# Backups can be encrypted symmetrically using age in case a passphrase is given.
|
||||||
|
|
||||||
|
# AGE_PASSPHRASE="<xxx>"
|
||||||
|
|
||||||
|
# Backups can be encrypted asymmetrically using age in case publickeys are given.
|
||||||
|
# Multiple keys need to be provided as a comma separated list. Right now, this only
|
||||||
|
# support passing age keys, with no support for ssh keys.
|
||||||
|
|
||||||
|
# AGE_PUBLIC_KEYS="<xxx>"
|
||||||
|
|
||||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||||
|
|
||||||
# Containers or services can be stopped by applying a
|
# Containers or services can be stopped by applying a
|
||||||
@@ -378,7 +406,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||||
# The content of such notifications can be customized. Dedicated documentation
|
# The content of such notifications can be customized. Dedicated documentation
|
||||||
# on how to do this can be found in the README. When providing multiple URLs or
|
# on how to do this can be found in the README. When providing multiple URLs or
|
||||||
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||||
|
|||||||
83
go.mod
83
go.mod
@@ -1,77 +1,86 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.22
|
go 1.23
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
filippo.io/age v1.2.1
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||||
github.com/containrrr/shoutrrr v0.7.1
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||||
|
github.com/containrrr/shoutrrr v0.8.0
|
||||||
github.com/cosiner/argv v0.1.0
|
github.com/cosiner/argv v0.1.0
|
||||||
github.com/docker/cli v24.0.9+incompatible
|
github.com/docker/cli v27.5.1+incompatible
|
||||||
github.com/docker/docker v24.0.7+incompatible
|
github.com/docker/docker v27.1.1+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.12.1
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/klauspost/compress v1.17.7
|
github.com/klauspost/compress v1.17.11
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.69
|
github.com/minio/minio-go/v7 v7.0.84
|
||||||
github.com/offen/envconfig v1.5.0
|
github.com/offen/envconfig v1.5.0
|
||||||
github.com/otiai10/copy v1.14.0
|
github.com/otiai10/copy v1.14.1
|
||||||
github.com/pkg/sftp v1.13.6
|
github.com/pkg/sftp v1.13.7
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/studio-b12/gowebdav v0.9.0
|
github.com/studio-b12/gowebdav v0.10.0
|
||||||
golang.org/x/crypto v0.21.0
|
golang.org/x/crypto v0.32.0
|
||||||
golang.org/x/oauth2 v0.19.0
|
golang.org/x/oauth2 v0.25.0
|
||||||
golang.org/x/sync v0.7.0
|
golang.org/x/sync v0.10.0
|
||||||
mvdan.cc/sh/v3 v3.8.0
|
mvdan.cc/sh/v3 v3.10.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/cloudflare/circl v1.3.7 // indirect
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-ini/ini v1.67.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/goccy/go-json v0.10.4 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
|
github.com/otiai10/mint v1.6.3 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.1
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.1
|
||||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.13.0 // indirect
|
github.com/fatih/color v1.17.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
|
||||||
github.com/klauspost/pgzip v1.2.6
|
github.com/klauspost/pgzip v1.2.6
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.5.0 // indirect
|
github.com/rs/xid v1.6.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
golang.org/x/net v0.22.0 // indirect
|
golang.org/x/net v0.34.0 // indirect
|
||||||
golang.org/x/sys v0.18.0 // indirect
|
golang.org/x/sys v0.29.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
|
||||||
gotest.tools/v3 v3.0.3 // indirect
|
gotest.tools/v3 v3.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func Wrap(err error, msg string) error {
|
|||||||
chunks := strings.Split(frame.Function, "/")
|
chunks := strings.Split(frame.Function, "/")
|
||||||
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fmt.Errorf(withCaller)
|
return errors.New(withCaller)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("%s: %w", withCaller, err)
|
return fmt.Errorf("%s: %w", withCaller, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
@@ -24,8 +26,9 @@ import (
|
|||||||
|
|
||||||
type azureBlobStorage struct {
|
type azureBlobStorage struct {
|
||||||
*storage.StorageBackend
|
*storage.StorageBackend
|
||||||
client *azblob.Client
|
client *azblob.Client
|
||||||
containerName string
|
uploadStreamOptions *blockblob.UploadStreamOptions
|
||||||
|
containerName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains values that define the configuration of an Azure Blob Storage.
|
// Config contains values that define the configuration of an Azure Blob Storage.
|
||||||
@@ -36,6 +39,7 @@ type Config struct {
|
|||||||
ConnectionString string
|
ConnectionString string
|
||||||
Endpoint string
|
Endpoint string
|
||||||
RemotePath string
|
RemotePath string
|
||||||
|
AccessTier string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
||||||
@@ -81,9 +85,26 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var uploadStreamOptions *blockblob.UploadStreamOptions
|
||||||
|
if opts.AccessTier != "" {
|
||||||
|
var found bool
|
||||||
|
for _, t := range blob.PossibleAccessTierValues() {
|
||||||
|
if string(t) == opts.AccessTier {
|
||||||
|
found = true
|
||||||
|
uploadStreamOptions = &blockblob.UploadStreamOptions{
|
||||||
|
AccessTier: &t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", opts.AccessTier))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
storage := azureBlobStorage{
|
storage := azureBlobStorage{
|
||||||
client: client,
|
client: client,
|
||||||
containerName: opts.ContainerName,
|
uploadStreamOptions: uploadStreamOptions,
|
||||||
|
containerName: opts.ContainerName,
|
||||||
StorageBackend: &storage.StorageBackend{
|
StorageBackend: &storage.StorageBackend{
|
||||||
DestinationPath: opts.RemotePath,
|
DestinationPath: opts.RemotePath,
|
||||||
Log: logFunc,
|
Log: logFunc,
|
||||||
@@ -103,12 +124,13 @@ func (b *azureBlobStorage) Copy(file string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = b.client.UploadStream(
|
_, err = b.client.UploadStream(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
b.containerName,
|
b.containerName,
|
||||||
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
||||||
fileReader,
|
fileReader,
|
||||||
nil,
|
b.uploadStreamOptions,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
FROM docker:26-dind
|
FROM docker:27-dind
|
||||||
|
|
||||||
RUN apk add \
|
RUN apk add \
|
||||||
|
age \
|
||||||
coreutils \
|
coreutils \
|
||||||
curl \
|
curl \
|
||||||
|
expect \
|
||||||
gpg \
|
gpg \
|
||||||
|
gpg-agent \
|
||||||
jq \
|
jq \
|
||||||
moreutils \
|
moreutils \
|
||||||
tar \
|
tar \
|
||||||
|
|||||||
24
test/age-passphrase/docker-compose.yml
Normal file
24
test/age-passphrase/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||||
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
|
AGE_PASSPHRASE: "Dance.0Tonight.Go.Typical"
|
||||||
|
volumes:
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
39
test/age-passphrase/run.sh
Executable file
39
test/age-passphrase/run.sh
Executable file
@@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename "$(pwd)")
|
||||||
|
|
||||||
|
export LOCAL_DIR="$(mktemp -d)"
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
TMP_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
# complex usage of expect(1) due to age not have a way to programmatically
|
||||||
|
# provide the passphrase
|
||||||
|
expect -i <<EOL
|
||||||
|
spawn age --decrypt -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||||
|
expect -exact "Enter passphrase: "
|
||||||
|
send -- "Dance.0Tonight.Go.Typical\r"
|
||||||
|
sleep 1
|
||||||
|
EOL
|
||||||
|
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||||
|
|
||||||
|
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
1
test/age-publickey/.gitignore
vendored
Normal file
1
test/age-publickey/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pk-*.txt
|
||||||
24
test/age-publickey/docker-compose.yml
Normal file
24
test/age-publickey/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.age
|
||||||
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
|
AGE_PUBLIC_KEYS: "${BACKUP_AGE_PUBLIC_KEYS}"
|
||||||
|
volumes:
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
43
test/age-publickey/run.sh
Executable file
43
test/age-publickey/run.sh
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename "$(pwd)")
|
||||||
|
|
||||||
|
export LOCAL_DIR="$(mktemp -d)"
|
||||||
|
|
||||||
|
age-keygen >"$LOCAL_DIR/pk-a.txt"
|
||||||
|
PK_A="$(grep -E 'public key' <"$LOCAL_DIR/pk-a.txt" | cut -d: -f2 | xargs)"
|
||||||
|
age-keygen >"$LOCAL_DIR/pk-b.txt"
|
||||||
|
PK_B="$(grep -E 'public key' <"$LOCAL_DIR/pk-b.txt" | cut -d: -f2 | xargs)"
|
||||||
|
|
||||||
|
export BACKUP_AGE_PUBLIC_KEYS="$PK_A,$PK_B"
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
do_decrypt() {
|
||||||
|
TMP_DIR=$(mktemp -d)
|
||||||
|
age --decrypt -i "$1" -o "$LOCAL_DIR/decrypted.tar.gz" "$LOCAL_DIR/test.tar.gz.age"
|
||||||
|
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C "$TMP_DIR"
|
||||||
|
|
||||||
|
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm -vf "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.age" ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
do_decrypt "$LOCAL_DIR/pk-a.txt"
|
||||||
|
do_decrypt "$LOCAL_DIR/pk-b.txt"
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
storage:
|
storage:
|
||||||
image: mcr.microsoft.com/azure-storage/azurite:3.26.0
|
image: mcr.microsoft.com/azure-storage/azurite:3.33.0
|
||||||
volumes:
|
volumes:
|
||||||
- ${DATA_DIR:-./data}:/data
|
- ${DATA_DIR:-./data}:/data
|
||||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
||||||
@@ -36,9 +34,10 @@ services:
|
|||||||
AZURE_STORAGE_CONTAINER_NAME: test-container
|
AZURE_STORAGE_CONTAINER_NAME: test-container
|
||||||
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
||||||
AZURE_STORAGE_PATH: 'path/to/backup'
|
AZURE_STORAGE_PATH: 'path/to/backup'
|
||||||
|
AZURE_STORAGE_ACCESS_TIER: Hot
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ rm "$LOCAL_DIR/test.tar.gz"
|
|||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="1s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
@@ -52,7 +52,7 @@ pass "Remote backups have not been deleted."
|
|||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create first backup with no prune"
|
info "Create first backup with no prune"
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
hostname: minio.local
|
hostname: minio.local
|
||||||
@@ -28,7 +26,7 @@ services:
|
|||||||
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
|
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
|
||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
volumes:
|
volumes:
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
database:
|
database:
|
||||||
image: mariadb:10.7
|
image: mariadb:10.7
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
openapi_mock:
|
openapi_mock:
|
||||||
image: muonsoft/openapi-mock:0.3.9
|
image: muonsoft/openapi-mock:0.3.9
|
||||||
@@ -32,7 +30,7 @@ services:
|
|||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
DROPBOX_ENDPOINT: http://openapi_mock:8080
|
DROPBOX_ENDPOINT: http://openapi_mock:8080
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ fi
|
|||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="1s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
logs=$(docker compose exec -T backup backup)
|
logs=$(docker compose exec -T backup backup)
|
||||||
@@ -43,7 +43,7 @@ fi
|
|||||||
|
|
||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create second backup and prune"
|
info "Create second backup and prune"
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
25
test/gpg-asym/docker-compose.yml
Normal file
25
test/gpg-asym/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
|
GPG_PUBLIC_KEY_RING_FILE: /keys/public_key.asc
|
||||||
|
volumes:
|
||||||
|
- ${KEY_DIR:-.}/public_key.asc:/keys/public_key.asc
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
49
test/gpg-asym/run.sh
Executable file
49
test/gpg-asym/run.sh
Executable file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export KEY_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export PASSPHRASE="test"
|
||||||
|
|
||||||
|
gpg --batch --gen-key <<EOF
|
||||||
|
Key-Type: RSA
|
||||||
|
Key-Length: 4096
|
||||||
|
Name-Real: offen
|
||||||
|
Name-Email: docker-volume-backup@local
|
||||||
|
Expire-Date: 0
|
||||||
|
Passphrase: $PASSPHRASE
|
||||||
|
%commit
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gpg --export --armor --batch --yes --pinentry-mode loopback --passphrase $PASSPHRASE --output $KEY_DIR/public_key.asc
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
TMP_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
gpg -d --pinentry-mode loopback --yes --passphrase $PASSPHRASE "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||||
|
|
||||||
|
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
@@ -8,7 +6,7 @@ services:
|
|||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
GPG_PASSPHRASE: 1234#$$ecret
|
GPG_PASSPHRASE: 1234#$$ecret
|
||||||
volumes:
|
volumes:
|
||||||
- ${LOCAL_DIR:-./local}:/archive
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
@@ -10,7 +8,7 @@ services:
|
|||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
|
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pass "Found symlink to latest version in local backup."
|
|||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="1s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
@@ -54,7 +54,7 @@ pass "Local backups have not been deleted."
|
|||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create first backup with no prune"
|
info "Create first backup with no prune"
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: '7'
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
volumes:
|
volumes:
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ sleep 5
|
|||||||
|
|
||||||
ec=0
|
ec=0
|
||||||
|
|
||||||
docker compose exec -e BACKUP_RETENTION_DAYS=7 -e BACKUP_FILENAME=test.tar.gz backup backup & \
|
docker compose exec -e BACKUP_RETENTION_PERIOD=168h -e BACKUP_FILENAME=test.tar.gz backup backup & \
|
||||||
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
|
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
|
||||||
|
|
||||||
if [ "$ec" = "0" ]; then
|
if [ "$ec" = "0" ]; then
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
image: postgres:14-alpine
|
image: postgres:14-alpine
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
@@ -27,7 +25,7 @@ services:
|
|||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: 7
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz
|
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz
|
||||||
|
|||||||
22
test/retention/docker-compose.yml
Normal file
22
test/retention/docker-compose.yml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_PERIOD: 15s
|
||||||
|
BACKUP_PRUNING_LEEWAY: 1s
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
28
test/retention/run.sh
Executable file
28
test/retention/run.sh
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
if [ $(ls -1 $LOCAL_DIR | wc -l) != "1" ]; then
|
||||||
|
fail "Unexpected number of backups after initial run"
|
||||||
|
fi
|
||||||
|
pass "Found 1 backup files."
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
if [ $(ls -1 $LOCAL_DIR | wc -l) != "1" ]; then
|
||||||
|
fail "Unexpected number of backups after initial run"
|
||||||
|
fi
|
||||||
|
pass "Found 1 backup files."
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
@@ -27,7 +25,7 @@ services:
|
|||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
volumes:
|
volumes:
|
||||||
|
|||||||
@@ -22,9 +22,11 @@ docker run --rm \
|
|||||||
|
|
||||||
pass "Found relevant files in untared remote backups."
|
pass "Found relevant files in untared remote backups."
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="5s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
@@ -39,7 +41,7 @@ pass "Remote backups have not been deleted."
|
|||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create first backup with no prune"
|
info "Create first backup with no prune"
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
@@ -33,7 +31,7 @@ services:
|
|||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: 7
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
volumes:
|
volumes:
|
||||||
- pg_data:/backup/pg_data:ro
|
- pg_data:/backup/pg_data:ro
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
@@ -27,7 +25,7 @@ services:
|
|||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: 7
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
volumes:
|
volumes:
|
||||||
- pg_data:/backup/pg_data:ro
|
- pg_data:/backup/pg_data:ro
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
ssh:
|
ssh:
|
||||||
image: linuxserver/openssh-server:version-8.6_p1-r3
|
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||||
@@ -21,7 +19,7 @@ services:
|
|||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
SSH_HOST_NAME: ssh
|
SSH_HOST_NAME: ssh
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ pass "Found relevant files in decrypted and untared remote backups."
|
|||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="1s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
@@ -43,7 +43,7 @@ pass "Remote backups have not been deleted."
|
|||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create first backup with no prune"
|
info "Create first backup with no prune"
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
@@ -33,7 +31,7 @@ services:
|
|||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: 7
|
BACKUP_RETENTION_PERIOD: 168h
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
volumes:
|
volumes:
|
||||||
- pg_data:/backup/pg_data:ro
|
- pg_data:/backup/pg_data:ro
|
||||||
|
|||||||
21
test/tar/docker-compose.yml
Normal file
21
test/tar/docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.{{ .Extension }}
|
||||||
|
BACKUP_COMPRESSION: none
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
25
test/tar/run.sh
Executable file
25
test/tar/run.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
tar -xvf "$LOCAL_DIR/test.tar" -C $tmp_dir
|
||||||
|
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
pass "Expected file was found."
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '2.4'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
alpine:
|
alpine:
|
||||||
image: alpine:3.17.3
|
image: alpine:3.17.3
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
webdav:
|
webdav:
|
||||||
image: bytemark/webdav:2.4
|
image: bytemark/webdav:2.4
|
||||||
@@ -20,7 +18,7 @@ services:
|
|||||||
BACKUP_FILENAME_EXPAND: 'true'
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_PERIOD: ${BACKUP_RETENTION_PERIOD:-168h}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
WEBDAV_URL: http://webdav/
|
WEBDAV_URL: http://webdav/
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ pass "Found relevant files in untared remote backup."
|
|||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
BACKUP_RETENTION_PERIOD="1s" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
@@ -39,7 +39,7 @@ pass "Remote backups have not been deleted."
|
|||||||
# The third part of this test checks if old backups get deleted when the retention
|
# The third part of this test checks if old backups get deleted when the retention
|
||||||
# is set to 7 days (which it should)
|
# is set to 7 days (which it should)
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
BACKUP_RETENTION_PERIOD="168h" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
info "Create first backup with no prune"
|
info "Create first backup with no prune"
|
||||||
|
|||||||
Reference in New Issue
Block a user