mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
14 Commits
v2.0.0-alp
...
v2.0.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f15379795f | ||
|
|
053422ba0e | ||
|
|
39b933b8e8 | ||
|
|
8161ad7f8f | ||
|
|
34e01fa303 | ||
|
|
2554c538ea | ||
|
|
77f948d4da | ||
|
|
d388785222 | ||
|
|
b46c402b19 | ||
|
|
29f8a078bc | ||
|
|
3f7c08d616 | ||
|
|
8b7d27740a | ||
|
|
9ddddc139a | ||
|
|
d34b525804 |
@@ -11,10 +11,6 @@ jobs:
|
||||
name: Build
|
||||
command: |
|
||||
docker build . -t offen/docker-volume-backup:canary
|
||||
- run:
|
||||
name: Install gnupg
|
||||
command: |
|
||||
sudo apt-get install -y gnupg
|
||||
- run:
|
||||
name: Run tests
|
||||
working_directory: ~/docker-volume-backup/test
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Backup Docker volumes locally or to any S3 compatible storage.
|
||||
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both), and rotates away old backups if configured.
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -79,7 +79,7 @@ AWS_S3_BUCKET_NAME="<xxx>"
|
||||
# that is expected to be bigger than the maximum difference of backups.
|
||||
# Valid values have a suffix of (s)econds, (m)inutes or (h)ours.
|
||||
|
||||
# BACKUP_PRUNING_LEEWAY="1m"
|
||||
# BACKUP_PRUNING_LEEWAY="10m"
|
||||
|
||||
# In case your target bucket or directory contains other files than the ones
|
||||
# managed by this container, you can limit the scope of rotation by setting
|
||||
@@ -177,8 +177,8 @@ docker exec <container_ref> backup
|
||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||
|
||||
- The original image is based on `ubuntu` and additional tools, making it very heavy. This version is roughly 1/25 in compressed size (it's ~12MB).
|
||||
- The original image uses a shell script, when this is written in Go, which makes it easier to extend and maintain (more verbose also).
|
||||
- The original image uses a shell script, when this is written in Go.
|
||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO. Local copies of backups can also be pruned once they reach a certain age.
|
||||
- InfluxDB specific functionality from the original image was removed.
|
||||
- InfluxDB specific functionality was removed.
|
||||
- `arm64` and `arm/v7` architectures are supported.
|
||||
- Docker in Swarm mode is supported.
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
@@ -12,9 +11,11 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -22,7 +23,6 @@ import (
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/leekchan/timeutil"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -44,31 +44,39 @@ func main() {
|
||||
s.logger.Info("Finished running backup tasks.")
|
||||
}
|
||||
|
||||
// script holds all the stateful information required to orchestrate a
|
||||
// single backup run.
|
||||
type script struct {
|
||||
ctx context.Context
|
||||
cli *client.Client
|
||||
mc *minio.Client
|
||||
logger *logrus.Logger
|
||||
ctx context.Context
|
||||
cli *client.Client
|
||||
mc *minio.Client
|
||||
logger *logrus.Logger
|
||||
file string
|
||||
bucket string
|
||||
archive string
|
||||
sources string
|
||||
passphrase string
|
||||
}
|
||||
|
||||
start time.Time
|
||||
|
||||
file string
|
||||
bucket string
|
||||
archive string
|
||||
sources string
|
||||
passphrase []byte
|
||||
retentionDays *int
|
||||
leeway *time.Duration
|
||||
containerLabel string
|
||||
pruningPrefix string
|
||||
// lock opens a lockfile at the given location, keeping it locked until the
|
||||
// caller invokes the returned release func. When invoked while the file is
|
||||
// still locked the function panics.
|
||||
func lock(lockfile string) func() error {
|
||||
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return func() error {
|
||||
if err := lf.Close(); err != nil {
|
||||
return fmt.Errorf("lock: error releasing file lock: %w", err)
|
||||
}
|
||||
if err := os.Remove(lockfile); err != nil {
|
||||
return fmt.Errorf("lock: error removing lock file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// init creates all resources needed for the script to perform actions against
|
||||
// remote resources like the Docker engine or remote storage locations. All
|
||||
// reading from env vars or other configuration sources is expected to happen
|
||||
// in this method.
|
||||
// remote resources like the Docker engine or remote storage locations.
|
||||
func (s *script) init() error {
|
||||
s.ctx = context.Background()
|
||||
s.logger = logrus.New()
|
||||
@@ -110,27 +118,7 @@ func (s *script) init() error {
|
||||
s.file = path.Join("/tmp", file)
|
||||
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
||||
s.sources = os.Getenv("BACKUP_SOURCES")
|
||||
if v := os.Getenv("GPG_PASSPHRASE"); v != "" {
|
||||
s.passphrase = []byte(v)
|
||||
}
|
||||
if v := os.Getenv("BACKUP_RETENTION_DAYS"); v != "" {
|
||||
i, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
||||
}
|
||||
s.retentionDays = &i
|
||||
}
|
||||
if v := os.Getenv("BACKUP_PRUNING_LEEWAY"); v != "" {
|
||||
d, err := time.ParseDuration(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init: error parsing BACKUP_PRUNING_LEEWAY as duration: %w", err)
|
||||
}
|
||||
s.leeway = &d
|
||||
}
|
||||
s.containerLabel = os.Getenv("BACKUP_STOP_CONTAINER_LABEL")
|
||||
s.pruningPrefix = os.Getenv("BACKUP_PRUNING_PREFIX")
|
||||
s.start = time.Now()
|
||||
|
||||
s.passphrase = os.Getenv("GPG_PASSPHRASE")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -139,9 +127,8 @@ func (s *script) init() error {
|
||||
// sure containers are being restarted if required.
|
||||
func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||
if s.cli == nil {
|
||||
return thunk()
|
||||
return nil
|
||||
}
|
||||
|
||||
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
||||
Quiet: true,
|
||||
})
|
||||
@@ -151,7 +138,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||
|
||||
containerLabel := fmt.Sprintf(
|
||||
"docker-volume-backup.stop-during-backup=%s",
|
||||
s.containerLabel,
|
||||
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
|
||||
)
|
||||
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
||||
Quiet: true,
|
||||
@@ -164,25 +151,22 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
||||
}
|
||||
|
||||
if len(containersToStop) == 0 {
|
||||
return thunk()
|
||||
}
|
||||
|
||||
s.logger.Infof(
|
||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||
"Stopping %d containers labeled `%s` out of %d running containers.",
|
||||
len(containersToStop),
|
||||
containerLabel,
|
||||
len(allContainers),
|
||||
)
|
||||
|
||||
var stoppedContainers []types.Container
|
||||
var stopErrors []error
|
||||
for _, container := range containersToStop {
|
||||
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
|
||||
stopErrors = append(stopErrors, err)
|
||||
} else {
|
||||
stoppedContainers = append(stoppedContainers, container)
|
||||
var errors []error
|
||||
if len(containersToStop) != 0 {
|
||||
for _, container := range containersToStop {
|
||||
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
stoppedContainers = append(stoppedContainers, container)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,17 +215,21 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||
err,
|
||||
)
|
||||
}
|
||||
s.logger.Infof("Restarted %d container(s) and the matching service(s).", len(stoppedContainers))
|
||||
s.logger.Infof("Successfully restarted %d containers.", len(stoppedContainers))
|
||||
return nil
|
||||
}()
|
||||
|
||||
if len(stopErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"stopContainersAndRun: %d error(s) stopping containers: %w",
|
||||
len(stopErrors),
|
||||
var stopErr error
|
||||
if len(errors) != 0 {
|
||||
stopErr = fmt.Errorf(
|
||||
"stopContainersAndRun: %d errors stopping containers: %w",
|
||||
len(errors),
|
||||
err,
|
||||
)
|
||||
}
|
||||
if stopErr != nil {
|
||||
return stopErr
|
||||
}
|
||||
|
||||
return thunk()
|
||||
}
|
||||
@@ -249,11 +237,15 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||
// takeBackup creates a tar archive of the configured backup location and
|
||||
// saves it to disk.
|
||||
func (s *script) takeBackup() error {
|
||||
s.file = timeutil.Strftime(&s.start, s.file)
|
||||
outBytes, err := exec.Command("date", fmt.Sprintf("+%s", s.file)).Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
|
||||
}
|
||||
s.file = strings.TrimSpace(string(outBytes))
|
||||
if err := targz.Compress(s.sources, s.file); err != nil {
|
||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||
}
|
||||
s.logger.Infof("Created backup of `%s` at `%s`.", s.sources, s.file)
|
||||
s.logger.Infof("Successfully created backup of `%s` at `%s`.", s.sources, s.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -261,14 +253,13 @@ func (s *script) takeBackup() error {
|
||||
// In case no passphrase is given it returns early, leaving the backup file
|
||||
// untouched.
|
||||
func (s *script) encryptBackup() error {
|
||||
if s.passphrase == nil {
|
||||
if s.passphrase == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
output := bytes.NewBuffer(nil)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, name := path.Split(s.file)
|
||||
|
||||
pt, err := openpgp.SymmetricallyEncrypt(output, []byte(s.passphrase), &openpgp.FileHints{
|
||||
pt, err := openpgp.SymmetricallyEncrypt(buf, []byte(s.passphrase), &openpgp.FileHints{
|
||||
IsBinary: true,
|
||||
FileName: name,
|
||||
}, nil)
|
||||
@@ -276,25 +267,28 @@ func (s *script) encryptBackup() error {
|
||||
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(s.file)
|
||||
unencrypted, err := ioutil.ReadFile(s.file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encryptBackup: error opening backup file %s: %w", s.file, err)
|
||||
pt.Close()
|
||||
return fmt.Errorf("encryptBackup: error reading unencrypted backup file: %w", err)
|
||||
}
|
||||
_, err = pt.Write(unencrypted)
|
||||
if err != nil {
|
||||
pt.Close()
|
||||
return fmt.Errorf("encryptBackup: error writing backup contents: %w", err)
|
||||
}
|
||||
fileReader := bufio.NewReader(file)
|
||||
fileReader.WriteTo(pt)
|
||||
pt.Close()
|
||||
|
||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||
if err := ioutil.WriteFile(gpgFile, output.Bytes(), os.ModeAppend); err != nil {
|
||||
if err := ioutil.WriteFile(gpgFile, buf.Bytes(), os.ModeAppend); err != nil {
|
||||
return fmt.Errorf("encryptBackup: error writing encrypted version of backup: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(s.file); err != nil {
|
||||
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
||||
}
|
||||
|
||||
s.file = gpgFile
|
||||
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||
s.logger.Infof("Successfully encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -309,14 +303,14 @@ func (s *script) copyBackup() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||
}
|
||||
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
||||
s.logger.Infof("Successfully uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
||||
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
|
||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||
}
|
||||
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
||||
s.logger.Infof("Successfully stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -326,7 +320,7 @@ func (s *script) cleanBackup() error {
|
||||
if err := os.Remove(s.file); err != nil {
|
||||
return fmt.Errorf("cleanBackup: error removing file: %w", err)
|
||||
}
|
||||
s.logger.Info("Cleaned up local artifacts.")
|
||||
s.logger.Info("Successfully cleaned up local artifacts.")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -334,22 +328,29 @@ func (s *script) cleanBackup() error {
|
||||
// the given configuration. In case the given configuration would delete all
|
||||
// backups, it does nothing instead.
|
||||
func (s *script) pruneOldBackups() error {
|
||||
if s.retentionDays == nil {
|
||||
retention := os.Getenv("BACKUP_RETENTION_DAYS")
|
||||
if retention == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.leeway != nil {
|
||||
s.logger.Infof("Sleeping for %s before pruning backups.", s.leeway)
|
||||
time.Sleep(*s.leeway)
|
||||
retentionDays, err := strconv.Atoi(retention)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
||||
}
|
||||
leeway := os.Getenv("BACKUP_PRUNING_LEEWAY")
|
||||
sleepFor, err := time.ParseDuration(leeway)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
|
||||
}
|
||||
s.logger.Infof("Sleeping for %s before pruning backups.", leeway)
|
||||
time.Sleep(sleepFor)
|
||||
|
||||
s.logger.Infof("Trying to prune backups older than %d day(s) now.", *s.retentionDays)
|
||||
deadline := s.start.AddDate(0, 0, -*s.retentionDays)
|
||||
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
|
||||
deadline := time.Now().AddDate(0, 0, -retentionDays)
|
||||
|
||||
if s.bucket != "" {
|
||||
candidates := s.mc.ListObjects(s.ctx, s.bucket, minio.ListObjectsOptions{
|
||||
WithMetadata: true,
|
||||
Prefix: s.pruningPrefix,
|
||||
Prefix: os.Getenv("BACKUP_PRUNING_PREFIX"),
|
||||
})
|
||||
|
||||
var matches []minio.ObjectInfo
|
||||
@@ -382,13 +383,13 @@ func (s *script) pruneOldBackups() error {
|
||||
|
||||
if len(errors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
|
||||
"pruneOldBackups: %d errors removing files from remote storage: %w",
|
||||
len(errors),
|
||||
errors[0],
|
||||
)
|
||||
}
|
||||
s.logger.Infof(
|
||||
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period.",
|
||||
"Successfully pruned %d out of %d remote backups as their age exceeded the configured retention period.",
|
||||
len(matches),
|
||||
lenCandidates,
|
||||
)
|
||||
@@ -398,13 +399,13 @@ func (s *script) pruneOldBackups() error {
|
||||
len(matches),
|
||||
)
|
||||
} else {
|
||||
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
|
||||
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
||||
candidates, err := filepath.Glob(
|
||||
path.Join(s.archive, fmt.Sprintf("%s*", s.pruningPrefix)),
|
||||
path.Join(s.archive, fmt.Sprintf("%s*", os.Getenv("BACKUP_PRUNING_PREFIX"))),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
@@ -412,7 +413,7 @@ func (s *script) pruneOldBackups() error {
|
||||
)
|
||||
}
|
||||
|
||||
var matches []string
|
||||
var matches []os.FileInfo
|
||||
for _, candidate := range candidates {
|
||||
fi, err := os.Stat(candidate)
|
||||
if err != nil {
|
||||
@@ -424,26 +425,26 @@ func (s *script) pruneOldBackups() error {
|
||||
}
|
||||
|
||||
if fi.ModTime().Before(deadline) {
|
||||
matches = append(matches, candidate)
|
||||
matches = append(matches, fi)
|
||||
}
|
||||
}
|
||||
|
||||
if len(matches) != 0 && len(matches) != len(candidates) {
|
||||
var errors []error
|
||||
for _, candidate := range matches {
|
||||
if err := os.Remove(candidate); err != nil {
|
||||
if err := os.Remove(candidate.Name()); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
|
||||
"pruneOldBackups: %d errors deleting local files, starting with: %w",
|
||||
len(errors),
|
||||
errors[0],
|
||||
)
|
||||
}
|
||||
s.logger.Infof(
|
||||
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period.",
|
||||
"Successfully pruned %d out of %d local backups as their age exceeded the configured retention period.",
|
||||
len(matches),
|
||||
len(candidates),
|
||||
)
|
||||
@@ -453,7 +454,7 @@ func (s *script) pruneOldBackups() error {
|
||||
len(matches),
|
||||
)
|
||||
} else {
|
||||
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
|
||||
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -469,26 +470,6 @@ func (s *script) must(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// lock opens a lockfile at the given location, keeping it locked until the
|
||||
// caller invokes the returned release func. When invoked while the file is
|
||||
// still locked the function panics.
|
||||
func lock(lockfile string) func() error {
|
||||
lf, err := os.OpenFile(lockfile, os.O_CREATE|os.O_RDWR, os.ModeAppend)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return func() error {
|
||||
if err := lf.Close(); err != nil {
|
||||
return fmt.Errorf("lock: error releasing file lock: %w", err)
|
||||
}
|
||||
if err := os.Remove(lockfile); err != nil {
|
||||
return fmt.Errorf("lock: error removing lock file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// copy creates a copy of the file located at `dst` at `src`.
|
||||
func copy(src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
set -e
|
||||
|
||||
# Write cronjob env to file, fill in sensible defaults, and read them back in
|
||||
mkdir -p /etc/backup
|
||||
cat <<EOF > /etc/backup.env
|
||||
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
@@ -16,7 +17,7 @@ BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
|
||||
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
|
||||
|
||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
||||
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-1m}"
|
||||
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
||||
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
|
||||
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
||||
|
||||
|
||||
3
go.mod
3
go.mod
@@ -5,9 +5,7 @@ go 1.17
|
||||
require (
|
||||
github.com/docker/docker v20.10.8+incompatible
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||
github.com/minio/minio-go/v7 v7.0.12
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
||||
)
|
||||
@@ -34,6 +32,7 @@ require (
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/rs/xid v1.2.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
||||
golang.org/x/text v0.3.4 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -401,8 +401,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d h1:2puqoOQwi3Ai1oznMOsFIbifm6kIfJaLLyYzWD4IzTs=
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d/go.mod h1:hO90vCP2x3exaSH58BIAowSKvV+0OsY21TtzuFGHON4=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
|
||||
@@ -28,7 +28,6 @@ services:
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
GPG_PASSPHRASE: 1234secret
|
||||
volumes:
|
||||
- ./local:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
|
||||
@@ -13,13 +13,11 @@ docker-compose exec backup backup
|
||||
|
||||
docker run --rm -it \
|
||||
-v compose_backup_data:/data alpine \
|
||||
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test.tar.gz.gpg > /tmp/test.tar.gz && tar -xf /tmp/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
||||
|
||||
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||
rm ./local/decrypted.tar.gz
|
||||
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||
|
||||
echo "[TEST:PASS] Found relevant files in untared local backup."
|
||||
|
||||
@@ -31,6 +29,8 @@ fi
|
||||
|
||||
echo "[TEST:PASS] All containers running post backup."
|
||||
|
||||
docker-compose down
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
|
||||
Reference in New Issue
Block a user