Compare commits

..

3 Commits

Author SHA1 Message Date
Frederik Ring
038116c3a3 Call through to cp -p for copying 2021-12-10 13:00:41 +01:00
Frederik Ring
7a5068446a Add test case for ownership 2021-12-10 10:50:15 +01:00
Frederik Ring
1b744d4c1c Allow changing backup ownership 2021-12-10 10:12:53 +01:00
78 changed files with 1567 additions and 4594 deletions

View File

@@ -3,9 +3,8 @@ version: 2.1
jobs:
canary:
machine:
image: ubuntu-2004:202201-02
image: ubuntu-1604:202007-01
working_directory: ~/docker-volume-backup
resource_class: large
steps:
- checkout
- run:
@@ -20,7 +19,6 @@ jobs:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
export GPG_TTY=$(tty)
./test.sh canary
build:
@@ -30,13 +28,11 @@ jobs:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
resource_class: large
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper:
release-tag: v0.6.4
- docker/install-docker-credential-helper
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
@@ -51,7 +47,6 @@ jobs:
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
tag_args="$tag_args -t offen/docker-volume-backup:latest"
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
fi
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
$tag_args . --push
@@ -72,4 +67,4 @@ workflows:
only: /^v.*/
orbs:
docker: circleci/docker@2.1.4
docker: circleci/docker@1.0.1

View File

@@ -1,7 +1 @@
test
.github
.circleci
docs
.editorconfig
LICENSE
README.md

3
.github/FUNDING.yml vendored
View File

@@ -1,3 +0,0 @@
github: offen
patreon: offen

View File

@@ -1,34 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
**To Reproduce**
Steps to reproduce the behavior:
1. ...
2. ...
3. ...
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Version (please complete the following information):**
- Image Version: <!-- e.g. v2.21.0 -->
- Docker Version: <!-- e.g. 20.10.17 -->
- Docker Compose Version (if applicable): <!-- e.g. 1.29.2 -->
**Additional context**
<!--
Add any other context about the problem here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Support request
about: Ask for help
title: ''
labels: ''
assignees: ''
---
**What are you trying to do?**
<!--
A clear and concise description of what you are trying to do, but cannot get working.
-->
**What is your current configuration?**
<!--
Add the full configuration you are using. Please redact out any real-world credentials.
-->
**Log output**
<!--
Provide the full log output of your setup.
-->
**Additional context**
<!--
Add any other context or screenshots about the support request here.
-->

View File

@@ -1,21 +1,21 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
FROM golang:1.19-alpine as builder
FROM golang:1.17-alpine as builder
WORKDIR /app
COPY . .
COPY go.mod go.sum ./
RUN go mod download
WORKDIR /app/cmd/backup
RUN go build -o backup .
COPY cmd/backup/main.go ./cmd/backup/main.go
RUN go build -o backup cmd/backup/main.go
FROM alpine:3.17
FROM alpine:3.14
WORKDIR /root
RUN apk add --no-cache ca-certificates
RUN apk add --update ca-certificates sudo
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY --from=builder /app/backup /usr/bin/backup
COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh

779
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,133 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
// Licensed under the MIT License: https://github.com/walle/targz/blob/57fe4206da5abf7dd3901b4af3891ec2f08c7b08/LICENSE
package main
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
)
func createArchive(files []string, inputFilePath, outputFilePath string) error {
inputFilePath = stripTrailingSlashes(inputFilePath)
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
if err != nil {
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
}
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
return fmt.Errorf("createArchive: error creating output file path: %w", err)
}
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
return fmt.Errorf("createArchive: error creating archive: %w", err)
}
return nil
}
func stripTrailingSlashes(path string) string {
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[0 : len(path)-1]
}
return path
}
func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error) {
inputFilePath, err := filepath.Abs(inputFilePath)
if err == nil {
outputFilePath, err = filepath.Abs(outputFilePath)
}
return inputFilePath, outputFilePath, err
}
func compress(paths []string, outFilePath, subPath string) error {
file, err := os.Create(outFilePath)
if err != nil {
return fmt.Errorf("compress: error creating out file: %w", err)
}
prefix := path.Dir(outFilePath)
gzipWriter := gzip.NewWriter(file)
tarWriter := tar.NewWriter(gzipWriter)
for _, p := range paths {
if err := writeTarGz(p, tarWriter, prefix); err != nil {
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
}
}
err = tarWriter.Close()
if err != nil {
return fmt.Errorf("compress: error closing tar writer: %w", err)
}
err = gzipWriter.Close()
if err != nil {
return fmt.Errorf("compress: error closing gzip writer: %w", err)
}
err = file.Close()
if err != nil {
return fmt.Errorf("compress: error closing file: %w", err)
}
return nil
}
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
fileInfo, err := os.Lstat(path)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
}
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
return nil
}
var link string
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
var err error
if link, err = os.Readlink(path); err != nil {
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
}
}
header, err := tar.FileInfoHeader(fileInfo, link)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
}
header.Name = strings.TrimPrefix(path, prefix)
err = tarWriter.WriteHeader(header)
if err != nil {
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
}
if !fileInfo.Mode().IsRegular() {
return nil
}
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
}
defer file.Close()
_, err = io.Copy(tarWriter, file)
if err != nil {
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
}
return nil
}

View File

@@ -1,119 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"regexp"
"time"
)
// Config holds all configuration values that are expected to be set
// by users.
type Config struct {
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
BackupPruningPrefix string `split_words:"true"`
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"`
EmailNotificationRecipient string `split_words:"true"`
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
WebdavUrl string `split_words:"true"`
WebdavUrlInsecure bool `split_words:"true"`
WebdavPath string `split_words:"true" default:"/"`
WebdavUsername string `split_words:"true"`
WebdavPassword string `split_words:"true"`
SSHHostName string `split_words:"true"`
SSHPort string `split_words:"true" default:"22"`
SSHUser string `split_words:"true"`
SSHPassword string `split_words:"true"`
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
SSHIdentityPassphrase string `split_words:"true"`
SSHRemotePath string `split_words:"true"`
ExecLabel string `split_words:"true"`
ExecForwardOutput bool `split_words:"true"`
LockTimeout time.Duration `split_words:"true" default:"60m"`
AzureStorageAccountName string `split_words:"true"`
AzureStoragePrimaryAccountKey string `split_words:"true"`
AzureStorageContainerName string `split_words:"true"`
AzureStoragePath string `split_words:"true"`
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
}
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
if secretPath == "" {
return envVar, nil
}
data, err := os.ReadFile(secretPath)
if err != nil {
return "", fmt.Errorf("resolveSecret: error reading secret path: %w", err)
}
return string(data), nil
}
type CertDecoder struct {
Cert *x509.Certificate
}
func (c *CertDecoder) Decode(v string) error {
if v == "" {
return nil
}
content, err := ioutil.ReadFile(v)
if err != nil {
content = []byte(v)
}
block, _ := pem.Decode(content)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("config: error parsing certificate: %w", err)
}
*c = CertDecoder{Cert: cert}
return nil
}
type RegexpDecoder struct {
Re *regexp.Regexp
}
func (r *RegexpDecoder) Decode(v string) error {
if v == "" {
return nil
}
re, err := regexp.Compile(v)
if err != nil {
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
}
*r = RegexpDecoder{Re: re}
return nil
}

View File

@@ -1,203 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
// Licensed under the Apache 2.0 License: https://github.com/moby/moby/blob/8e610b2b55bfd1bfa9436ab110d311f5e8a74dcb/LICENSE
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/cosiner/argv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/sync/errgroup"
)
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
args, _ := argv.Argv(command, nil, nil)
commandEnv := []string{
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
}
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
Cmd: args[0],
AttachStdin: true,
AttachStderr: true,
Env: commandEnv,
})
if err != nil {
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
}
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
if err != nil {
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
}
defer resp.Close()
var outBuf, errBuf bytes.Buffer
outputDone := make(chan error)
go func() {
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
outputDone <- err
}()
select {
case err := <-outputDone:
if err != nil {
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
}
break
}
stdout, err := ioutil.ReadAll(&outBuf)
if err != nil {
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
}
stderr, err := ioutil.ReadAll(&errBuf)
if err != nil {
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
}
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
if err != nil {
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
}
if res.ExitCode > 0 {
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
}
return stdout, stderr, nil
}
func (s *script) runLabeledCommands(label string) error {
f := []filters.KeyValuePair{
{Key: "label", Value: label},
}
if s.c.ExecLabel != "" {
f = append(f, filters.KeyValuePair{
Key: "label",
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
})
}
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
var hasDeprecatedContainers bool
if label == "docker-volume-backup.archive-pre" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-pre",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if label == "docker-volume-backup.archive-post" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-post",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if len(containersWithCommand) == 0 {
return nil
}
if hasDeprecatedContainers {
s.logger.Warn(
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
)
}
g := new(errgroup.Group)
for _, container := range containersWithCommand {
c := container
g.Go(func() error {
cmd, ok := c.Labels[label]
if !ok && label == "docker-volume-backup.archive-pre" {
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
} else if !ok && label == "docker-volume-backup.archive-post" {
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
}
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
stdout, stderr, err := s.exec(c.ID, cmd)
if s.c.ExecForwardOutput {
os.Stderr.Write(stderr)
os.Stdout.Write(stdout)
}
if err != nil {
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
}
return nil
})
}
if err := g.Wait(); err != nil {
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
}
return nil
}
type lifecyclePhase string
const (
lifecyclePhaseArchive lifecyclePhase = "archive"
lifecyclePhaseProcess lifecyclePhase = "process"
lifecyclePhaseCopy lifecyclePhase = "copy"
lifecyclePhasePrune lifecyclePhase = "prune"
)
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
if s.cli == nil {
return cb
}
return func() error {
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
}
defer func() {
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
}()
return cb()
}
}

View File

@@ -1,58 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"fmt"
"sort"
"github.com/offen/docker-volume-backup/internal/utilities"
)
// hook contains a queued action that can be trigger them when the script
// reaches a certain point (e.g. unsuccessful backup)
type hook struct {
level hookLevel
action func(err error) error
}
type hookLevel int
const (
hookLevelPlumbing hookLevel = iota
hookLevelError
hookLevelInfo
)
var hookLevels = map[string]hookLevel{
"info": hookLevelInfo,
"error": hookLevelError,
}
// registerHook adds the given action at the given level.
func (s *script) registerHook(level hookLevel, action func(err error) error) {
s.hooks = append(s.hooks, hook{level, action})
}
// runHooks runs all hooks that have been registered using the
// given levels in the defined ordering. In case executing a hook returns an
// error, the following hooks will still be run before the function returns.
func (s *script) runHooks(err error) error {
sort.SliceStable(s.hooks, func(i, j int) bool {
return s.hooks[i].level < s.hooks[j].level
})
var actionErrors []error
for _, hook := range s.hooks {
if hook.level > s.hookLevel {
continue
}
if actionErr := hook.action(err); actionErr != nil {
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
}
}
if len(actionErrors) != 0 {
return utilities.Join(actionErrors...)
}
return nil
}

View File

@@ -1,58 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"errors"
"fmt"
"time"
"github.com/gofrs/flock"
)
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. In case the lock is currently blocked
// by another execution, it will repeatedly retry until the lock is available
// or the given timeout is exceeded.
func (s *script) lock(lockfile string) (func() error, error) {
start := time.Now()
defer func() {
s.stats.LockedTime = time.Now().Sub(start)
}()
retry := time.NewTicker(5 * time.Second)
defer retry.Stop()
deadline := time.NewTimer(s.c.LockTimeout)
defer deadline.Stop()
fileLock := flock.New(lockfile)
for {
acquired, err := fileLock.TryLock()
if err != nil {
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
}
if acquired {
if s.encounteredLock {
s.logger.Info("Acquired exclusive lock on subsequent attempt, ready to continue.")
}
return fileLock.Unlock, nil
}
if !s.encounteredLock {
s.logger.Infof(
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
s.c.LockTimeout,
)
s.encounteredLock = true
}
select {
case <-retry.C:
continue
case <-deadline.C:
return noop, errors.New("lock: timed out waiting for lockfile to become available")
}
}
}

View File

@@ -1,26 +1,50 @@
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
// Copyright 2021 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/go-gomail/gomail"
"github.com/gofrs/flock"
"github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil"
"github.com/m90/targz"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/otiai10/copy"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/openpgp"
)
func main() {
unlock := lock("/var/lock/dockervolumebackup.lock")
defer unlock()
s, err := newScript()
if err != nil {
panic(err)
}
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
defer unlock()
s.must(err)
defer func() {
if pArg := recover(); pArg != nil {
if err, ok := pArg.(error); ok {
if hookErr := s.runHooks(err); hookErr != nil {
if hookErr := s.runHooks(err, hookLevelCleanup, hookLevelFailure); hookErr != nil {
s.logger.Errorf("An error occurred calling the registered hooks: %s", hookErr)
}
os.Exit(1)
@@ -28,7 +52,7 @@ func main() {
panic(pArg)
}
if err := s.runHooks(nil); err != nil {
if err := s.runHooks(nil, hookLevelCleanup); err != nil {
s.logger.Errorf(
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
err,
@@ -38,7 +62,7 @@ func main() {
s.logger.Info("Finished running backup tasks.")
}()
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
s.must(func() error {
restartContainers, err := s.stopContainers()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
@@ -49,10 +73,669 @@ func main() {
if err != nil {
return err
}
return s.createArchive()
})())
return s.takeBackup()
}())
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
s.must(s.encryptBackup())
s.must(s.copyBackup())
s.must(s.pruneOldBackups())
}
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct {
cli *client.Client
mc *minio.Client
logger *logrus.Logger
hooks []hook
start time.Time
file string
output *bytes.Buffer
c *config
}
type config struct {
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
BackupPruningPrefix string `split_words:"true"`
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupUID int `split_words:"true" default:"-1"`
BackupGID int `split_words:"true" default:"-1"`
AwsS3BucketName string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
GpgPassphrase string `split_words:"true"`
EmailNotificationRecipient string `split_words:"true"`
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
}
var msgBackupFailed = "backup run failed"
// newScript creates all resources needed for the script to perform actions against
// remote resources like the Docker engine or remote storage locations. All
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: &config{},
logger: &logrus.Logger{
Out: stdOut,
Formatter: new(logrus.TextFormatter),
Hooks: make(logrus.LevelHooks),
Level: logrus.InfoLevel,
},
start: time.Now(),
output: logBuffer,
}
if err := envconfig.Process("", s.c); err != nil {
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
}
s.file = path.Join("/tmp", s.c.BackupFilename)
_, err := os.Stat("/var/run/docker.sock")
if !os.IsNotExist(err) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, fmt.Errorf("newScript: failed to create docker client")
}
s.cli = cli
}
if s.c.AwsS3BucketName != "" {
var creds *credentials.Credentials
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
creds = credentials.NewStaticV4(
s.c.AwsAccessKeyID,
s.c.AwsSecretAccessKey,
"",
)
} else if s.c.AwsIamRoleEndpoint != "" {
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
} else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: s.c.AwsEndpointProto == "https",
}
if s.c.AwsEndpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("newScript: failed to create default minio transport")
}
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(s.c.AwsEndpoint, &options)
if err != nil {
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
}
s.mc = mc
}
if s.c.EmailNotificationRecipient != "" {
s.hooks = append(s.hooks, hook{hookLevelFailure, func(err error, start time.Time, logOutput string) error {
mailer := gomail.NewDialer(
s.c.EmailSMTPHost, s.c.EmailSMTPPort, s.c.EmailSMTPUsername, s.c.EmailSMTPPassword,
)
subject := fmt.Sprintf(
"Failure running docker-volume-backup at %s", start.Format(time.RFC3339),
)
body := fmt.Sprintf(
"Running docker-volume-backup failed with error: %s\n\nLog output of the failed run was:\n\n%s\n", err, logOutput,
)
message := gomail.NewMessage()
message.SetHeader("From", s.c.EmailNotificationSender)
message.SetHeader("To", s.c.EmailNotificationRecipient)
message.SetHeader("Subject", subject)
message.SetBody("text/plain", body)
return mailer.DialAndSend(message)
}})
}
return s, nil
}
var noop = func() error { return nil }
// registerHook adds the given action at the given level.
func (s *script) registerHook(level hookLevel, action func(err error, start time.Time, logOutput string) error) {
s.hooks = append(s.hooks, hook{level, action})
}
// stopContainers stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) {
if s.cli == nil {
return noop, nil
}
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
})
if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
s.c.BackupStopContainerLabel,
)
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: containerLabel,
}),
})
if err != nil {
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
}
if len(containersToStop) == 0 {
return noop, nil
}
s.logger.Infof(
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
len(containersToStop),
containerLabel,
len(allContainers),
)
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
var stopError error
if len(stopErrors) != 0 {
stopError = fmt.Errorf(
"stopContainersAndRun: %d error(s) stopping containers: %w",
len(stopErrors),
join(stopErrors...),
)
}
return func() error {
servicesRequiringUpdate := map[string]struct{}{}
var restartErrors []error
for _, container := range stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
servicesRequiringUpdate[swarmServiceName] = struct{}{}
continue
}
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
}
}
if len(servicesRequiringUpdate) != 0 {
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
for serviceName := range servicesRequiringUpdate {
var serviceMatch swarm.Service
for _, service := range services {
if service.Spec.Name == serviceName {
serviceMatch = service
break
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
); err != nil {
restartErrors = append(restartErrors, err)
}
}
}
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
len(restartErrors),
join(restartErrors...),
)
}
s.logger.Infof(
"Restarted %d container(s) and the matching service(s).",
len(stoppedContainers),
)
return nil
}, stopError
}
// takeBackup creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) takeBackup() error {
s.file = timeutil.Strftime(&s.start, s.file)
backupSources := s.c.BackupSources
if s.c.BackupFromSnapshot {
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelCleanup, func(error, time.Time, string) error {
if err := remove(backupSources); err != nil {
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
return nil
})
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}
tarFile := s.file
s.registerHook(hookLevelCleanup, func(error, time.Time, string) error {
if err := remove(tarFile); err != nil {
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})
if err := targz.Compress(backupSources, tarFile); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
}
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
return nil
}
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptBackup() error {
if s.c.GpgPassphrase == "" {
return nil
}
gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelCleanup, func(error, time.Time, string) error {
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})
outFile, err := os.Create(gpgFile)
defer outFile.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
}
_, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
defer dst.Close()
if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
}
src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
}
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
}
s.file = gpgFile
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
return nil
}
// copyBackup makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyBackup() error {
_, name := path.Split(s.file)
if s.mc != nil {
if _, err := s.mc.FPutObject(context.Background(), s.c.AwsS3BucketName, name, s.file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
}); err != nil {
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
}
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
if err := os.Chown(s.file, s.c.BackupUID, s.c.BackupGID); err != nil {
return fmt.Errorf("copyBackup: error changing owner on temp file: %w", err)
}
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
if s.c.BackupLatestSymlink != "" {
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
}
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
}
}
return nil
}
// pruneOldBackups rotates away backups from local and remote storages using
// the given configuration. In case the given configuration would delete all
// backups, it does nothing instead.
func (s *script) pruneOldBackups() error {
if s.c.BackupRetentionDays < 0 {
return nil
}
if s.c.BackupPruningLeeway != 0 {
s.logger.Infof("Sleeping for %s before pruning backups.", s.c.BackupPruningLeeway)
time.Sleep(s.c.BackupPruningLeeway)
}
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays))
if s.mc != nil {
candidates := s.mc.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: s.c.BackupPruningPrefix,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return fmt.Errorf(
"pruneOldBackups: error looking up candidates from remote storage: %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
if len(matches) != 0 && len(matches) != lenCandidates {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := s.mc.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
len(removeErrors),
join(removeErrors...),
)
}
s.logger.Infof(
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period of %d days.",
len(matches),
lenCandidates,
s.c.BackupRetentionDays,
)
} else if len(matches) != 0 && len(matches) == lenCandidates {
s.logger.Warnf(
"The current configuration would delete all %d remote backup copies.",
len(matches),
)
s.logger.Warn("Refusing to do so, please check your configuration.")
} else {
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
}
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
globPattern := path.Join(
s.c.BackupArchive,
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
if len(matches) != 0 && len(matches) != len(candidates) {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
join(removeErrors...),
)
}
s.logger.Infof(
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period of %d days.",
len(matches),
len(candidates),
s.c.BackupRetentionDays,
)
} else if len(matches) != 0 && len(matches) == len(candidates) {
s.logger.Warnf(
"The current configuration would delete all %d local backup copies.",
len(matches),
)
s.logger.Warn("Refusing to do so, please check your configuration.")
} else {
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
}
}
return nil
}
// runHooks runs all hooks that have been registered using the
// given levels in the defined ordering. In case executing a hook returns an
// error, the following hooks will still be run before the function returns.
func (s *script) runHooks(err error, levels ...hookLevel) error {
var actionErrors []error
for _, level := range levels {
for _, hook := range s.hooks {
if hook.level != level {
continue
}
if actionErr := hook.action(err, s.start, s.output.String()); actionErr != nil {
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
}
}
}
if len(actionErrors) != 0 {
return join(actionErrors...)
}
return nil
}
// must exits the script run prematurely in case the given error
// is non-nil.
func (s *script) must(err error) {
if err != nil {
s.logger.Errorf("Fatal error running backup: %s", err)
panic(err)
}
}
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. When invoked while the file is
// still locked the function panics.
func lock(lockfile string) func() error {
fileLock := flock.New(lockfile)
acquired, err := fileLock.TryLock()
if err != nil {
panic(err)
}
if !acquired {
panic("unable to acquire file lock")
}
return fileLock.Unlock
}
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
cmd := exec.Command("cp", "-p", src, dst)
return cmd.Run()
}
// join takes a list of errors and joins them into a single error
func join(errs ...error) error {
if len(errs) == 1 {
return errs[0]
}
var msgs []string
for _, err := range errs {
if err == nil {
continue
}
msgs = append(msgs, err.Error())
}
return errors.New("[" + strings.Join(msgs, ", ") + "]")
}
// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}
type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}
// hook contains a queued action that can be trigger them when the script
// reaches a certain point (e.g. unsuccessful backup)
type hook struct {
level hookLevel
action func(err error, start time.Time, logOutput string) error
}
type hookLevel int
const (
hookLevelFailure hookLevel = iota
hookLevelCleanup
)

View File

@@ -1,108 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
_ "embed"
"fmt"
"os"
"text/template"
"time"
sTypes "github.com/containrrr/shoutrrr/pkg/types"
"github.com/offen/docker-volume-backup/internal/utilities"
)
//go:embed notifications.tmpl
var defaultNotifications string
// NotificationData data to be passed to the notification templates
type NotificationData struct {
Error error
Config *Config
Stats *Stats
}
// notify sends a notification using the given title and body templates.
// Automatically creates notification data, adding the given error
func (s *script) notify(titleTemplate string, bodyTemplate string, err error) error {
params := NotificationData{
Error: err,
Stats: s.stats,
Config: s.c,
}
titleBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
}
bodyBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
}
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
return fmt.Errorf("notify: error notifying: %w", err)
}
return nil
}
// notifyFailure sends a notification about a failed backup run
func (s *script) notifyFailure(err error) error {
return s.notify("title_failure", "body_failure", err)
}
// notifyFailure sends a notification about a successful backup run
func (s *script) notifySuccess() error {
return s.notify("title_success", "body_success", nil)
}
// sendNotification sends a notification to all configured third party services
func (s *script) sendNotification(title, body string) error {
var errs []error
for _, result := range s.sender.Send(body, &sTypes.Params{"title": title}) {
if result != nil {
errs = append(errs, result)
}
}
if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", utilities.Join(errs...))
}
return nil
}
var templateHelpers = template.FuncMap{
"formatTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"formatBytesDec": func(bytes uint64) string {
return formatBytes(bytes, true)
},
"formatBytesBin": func(bytes uint64) string {
return formatBytes(bytes, false)
},
"env": os.Getenv,
}
// formatBytes converts an amount of bytes in a human-readable representation
// the decimal parameter specifies if using powers of 1000 (decimal) or powers of 1024 (binary)
func formatBytes(b uint64, decimal bool) string {
unit := uint64(1024)
format := "%.1f %ciB"
if decimal {
unit = uint64(1000)
format = "%.1f %cB"
}
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := unit, 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf(format, float64(b)/float64(div), "kMGTPE"[exp])
}

View File

@@ -1,26 +0,0 @@
{{ define "title_failure" -}}
Failure running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
{{- end }}
{{ define "body_failure" -}}
Running docker-volume-backup failed with error: {{ .Error }}
Log output of the failed run was:
{{ .Stats.LogOutput }}
{{- end }}
{{ define "title_success" -}}
Success running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
{{- end }}
{{ define "body_success" -}}
Running docker-volume-backup succeeded.
Log output was:
{{ .Stats.LogOutput }}
{{- end }}

View File

@@ -1,579 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"context"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"text/template"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/storage/azure"
"github.com/offen/docker-volume-backup/internal/storage/local"
"github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/offen/docker-volume-backup/internal/utilities"
"github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil"
"github.com/otiai10/copy"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/openpgp"
"golang.org/x/sync/errgroup"
)
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct {
cli *client.Client
storages []storage.Backend
logger *logrus.Logger
sender *router.ServiceRouter
template *template.Template
hooks []hook
hookLevel hookLevel
file string
stats *Stats
encounteredLock bool
c *Config
}
// newScript creates all resources needed for the script to perform actions against
// remote resources like the Docker engine or remote storage locations. All
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: &Config{},
logger: &logrus.Logger{
Out: stdOut,
Formatter: new(logrus.TextFormatter),
Hooks: make(logrus.LevelHooks),
Level: logrus.InfoLevel,
},
stats: &Stats{
StartTime: time.Now(),
LogOutput: logBuffer,
Storages: map[string]StorageStats{
"S3": {},
"WebDAV": {},
"SSH": {},
"Local": {},
"Azure": {},
},
},
}
s.registerHook(hookLevelPlumbing, func(error) error {
s.stats.EndTime = time.Now()
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
return nil
})
if err := envconfig.Process("", s.c); err != nil {
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
}
s.file = path.Join("/tmp", s.c.BackupFilename)
if s.c.BackupFilenameExpand {
s.file = os.ExpandEnv(s.file)
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
s.c.BackupPruningPrefix = os.ExpandEnv(s.c.BackupPruningPrefix)
}
s.file = timeutil.Strftime(&s.stats.StartTime, s.file)
_, err := os.Stat("/var/run/docker.sock")
_, dockerHostSet := os.LookupEnv("DOCKER_HOST")
if !os.IsNotExist(err) || dockerHostSet {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, fmt.Errorf("newScript: failed to create docker client")
}
s.cli = cli
}
logFunc := func(logType storage.LogLevel, context string, msg string, params ...interface{}) {
switch logType {
case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...)
case storage.LogLevelError:
s.logger.Errorf("["+context+"] "+msg, params...)
case storage.LogLevelInfo:
default:
s.logger.Infof("["+context+"] "+msg, params...)
}
}
if s.c.AwsS3BucketName != "" {
accessKeyID, err := s.c.resolveSecret(s.c.AwsAccessKeyID, s.c.AwsAccessKeyIDFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsAccessKeyID: %w", err)
}
secretAccessKey, err := s.c.resolveSecret(s.c.AwsSecretAccessKey, s.c.AwsSecretAccessKeyFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsSecretAccessKey: %w", err)
}
s3Config := s3.Config{
Endpoint: s.c.AwsEndpoint,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, s3Backend)
}
}
if s.c.WebdavUrl != "" {
webDavConfig := webdav.Config{
URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, webdavBackend)
}
}
if s.c.SSHHostName != "" {
sshConfig := ssh.Config{
HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, sshBackend)
}
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}
if s.c.AzureStorageAccountName != "" {
azureConfig := azure.Config{
ContainerName: s.c.AzureStorageContainerName,
AccountName: s.c.AzureStorageAccountName,
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
Endpoint: s.c.AzureStorageEndpoint,
RemotePath: s.c.AzureStoragePath,
}
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
if err != nil {
return nil, err
}
s.storages = append(s.storages, azureBackend)
}
if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf(
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
s.c.EmailSMTPUsername,
s.c.EmailSMTPPassword,
s.c.EmailSMTPHost,
s.c.EmailSMTPPort,
s.c.EmailNotificationSender,
s.c.EmailNotificationRecipient,
)
s.c.NotificationURLs = append(s.c.NotificationURLs, emailURL)
s.logger.Warn(
"Using EMAIL_* keys for providing notification configuration has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use NOTIFICATION_URLS instead. Refer to the README for an upgrade guide.",
)
}
hookLevel, ok := hookLevels[s.c.NotificationLevel]
if !ok {
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
}
s.hookLevel = hookLevel
if len(s.c.NotificationURLs) > 0 {
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
if senderErr != nil {
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
}
s.sender = sender
tmpl := template.New("")
tmpl.Funcs(templateHelpers)
tmpl, err = tmpl.Parse(defaultNotifications)
if err != nil {
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
}
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
if err != nil {
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
}
}
s.template = tmpl
// To prevent duplicate notifications, ensure the regsistered callbacks
// run mutually exclusive.
s.registerHook(hookLevelError, func(err error) error {
if err == nil {
return nil
}
return s.notifyFailure(err)
})
s.registerHook(hookLevelInfo, func(err error) error {
if err != nil {
return nil
}
return s.notifySuccess()
})
}
return s, nil
}
// stopContainers stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) {
if s.cli == nil {
return noop, nil
}
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
s.c.BackupStopContainerLabel,
)
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: containerLabel,
}),
})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
}
if len(containersToStop) == 0 {
return noop, nil
}
s.logger.Infof(
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
len(containersToStop),
containerLabel,
len(allContainers),
)
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
var stopError error
if len(stopErrors) != 0 {
stopError = fmt.Errorf(
"stopContainers: %d error(s) stopping containers: %w",
len(stopErrors),
utilities.Join(stopErrors...),
)
}
s.stats.Containers = ContainersStats{
All: uint(len(allContainers)),
ToStop: uint(len(containersToStop)),
Stopped: uint(len(stoppedContainers)),
}
return func() error {
servicesRequiringUpdate := map[string]struct{}{}
var restartErrors []error
for _, container := range stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
servicesRequiringUpdate[swarmServiceName] = struct{}{}
continue
}
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
}
}
if len(servicesRequiringUpdate) != 0 {
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
for serviceName := range servicesRequiringUpdate {
var serviceMatch swarm.Service
for _, service := range services {
if service.Spec.Name == serviceName {
serviceMatch = service
break
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
); err != nil {
restartErrors = append(restartErrors, err)
}
}
}
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainers: %d error(s) restarting containers and services: %w",
len(restartErrors),
utilities.Join(restartErrors...),
)
}
s.logger.Infof(
"Restarted %d container(s) and the matching service(s).",
len(stoppedContainers),
)
return nil
}, stopError
}
// createArchive creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) createArchive() error {
backupSources := s.c.BackupSources
if s.c.BackupFromSnapshot {
s.logger.Warn(
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
)
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(backupSources); err != nil {
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
return nil
})
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}
tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(tarFile); err != nil {
return fmt.Errorf("createArchive: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil {
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
}
var filesEligibleForBackup []string
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
if err != nil {
return err
}
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
return nil
}
filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil
}); err != nil {
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
}
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
}
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
return nil
}
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptArchive() error {
if s.c.GpgPassphrase == "" {
return nil
}
gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})
outFile, err := os.Create(gpgFile)
if err != nil {
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
}
defer outFile.Close()
_, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
if err != nil {
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
}
defer dst.Close()
src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
}
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
}
s.file = gpgFile
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
return nil
}
// copyArchive makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyArchive() error {
_, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
} else {
size := stat.Size()
s.stats.BackupFile = BackupFileStats{
Size: uint64(size),
Name: name,
FullPath: s.file,
}
}
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
return b.Copy(s.file)
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("copyArchive: error copying archive: %w", err)
}
return nil
}
// pruneBackups rotates away backups from local and remote storages using
// the given configuration. In case the given configuration would delete all
// backups, it does nothing instead and logs a warning.
func (s *script) pruneBackups() error {
if s.c.BackupRetentionDays < 0 {
return nil
}
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
if err != nil {
return err
}
s.stats.Lock()
s.stats.Storages[b.Name()] = StorageStats{
Total: stats.Total,
Pruned: stats.Pruned,
}
s.stats.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
}
return nil
}
// must exits the script run prematurely in case the given error
// is non-nil.
func (s *script) must(err error) {
if err != nil {
s.logger.Errorf("Fatal error running backup: %s", err)
panic(err)
}
}

View File

@@ -1,45 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"sync"
"time"
)
// ContainersStats stats about the docker containers
type ContainersStats struct {
All uint
ToStop uint
Stopped uint
StopErrors uint
}
// BackupFileStats stats about the created backup file
type BackupFileStats struct {
Name string
FullPath string
Size uint64
}
// StorageStats stats about the status of an archival directory
type StorageStats struct {
Total uint
Pruned uint
PruneErrors uint
}
// Stats global stats regarding script execution
type Stats struct {
sync.Mutex
StartTime time.Time
EndTime time.Time
TookTime time.Duration
LockedTime time.Duration
LogOutput *bytes.Buffer
Containers ContainersStats
BackupFile BackupFileStats
Storages map[string]StorageStats
}

View File

@@ -1,52 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"fmt"
"io"
"os"
)
var noop = func() error { return nil }
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}
// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}
type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}

View File

@@ -1,40 +0,0 @@
# Notification templates reference
In order to customize title and body of notifications you'll have to write a [go template](https://pkg.go.dev/text/template) and mount it inside the `/etc/dockervolumebackup/notifications.d/` directory.
Configuration, data about the backup run and helper functions will be passed to this template, this page documents them fully.
## Data
Here is a list of all data passed to the template:
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
* `StartTime`: time when the script started execution
* `EndTime`: time when the backup has completed successfully (after pruning)
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
* `LockedTime`: amount of time it took for the backup to acquire the exclusive lock
* `LogOutput`: full log of the application
* `Containers`: object containing stats about the docker containers
* `All`: total number of containers
* `ToStop`: number of containers matched by the stop rule
* `Stopped`: number of containers successfully stopped
* `StopErrors`: number of containers that were unable to be stopped (equal to `ToStop - Stopped`)
* `BackupFile`: object containing information about the backup file
* `Name`: name of the backup file (e.g. `backup-2022-02-11T01-00-00.tar.gz`)
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
* `Size`: size in bytes of the backup file
* `Storages`: object that holds stats about each storage
* `Local`, `S3`, `WebDAV`, `Azure` or `SSH`:
* `Total`: total number of backup files
* `Pruned`: number of backup files that were deleted due to pruning rule
* `PruneErrors`: number of backup files that were unable to be pruned
## Functions
Some formatting and helper functions are also available:
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
* `env`: returns the value of the environment variable of the given key if set

View File

@@ -5,22 +5,10 @@
set -e
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
else
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
crontab -r && crontab /dev/null
for file in /etc/dockervolumebackup/conf.d/*; do
source $file
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
done
fi
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
echo "Starting cron in foreground."
crond -f -d 8
crond -f -l 8

70
go.mod
View File

@@ -1,74 +1,50 @@
module github.com/offen/docker-volume-backup
go 1.19
go 1.17
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
github.com/containrrr/shoutrrr v0.5.2
github.com/cosiner/argv v0.1.0
github.com/docker/docker v20.10.11+incompatible
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df
github.com/gofrs/flock v0.8.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.44
github.com/m90/targz v0.0.0-20210904082215-2e9a4529a615
github.com/minio/minio-go/v7 v7.0.16
github.com/otiai10/copy v1.7.0
github.com/pkg/sftp v1.13.5
github.com/sirupsen/logrus v1.9.0
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
golang.org/x/crypto v0.3.0
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
github.com/sirupsen/logrus v1.8.1
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/containerd/containerd v1.6.6 // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/containerd/containerd v1.5.5 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.10.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nxadm/tail v1.4.6 // indirect
github.com/onsi/ginkgo v1.14.2 // indirect
github.com/onsi/gomega v1.10.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/rs/xid v1.4.0 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
github.com/rs/xid v1.3.0 // indirect
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
golang.org/x/text v0.3.6 // indirect
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
google.golang.org/grpc v1.33.2 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
gopkg.in/ini.v1 v1.65.0 // indirect
)

884
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package azure
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"text/template"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
)
type azureBlobStorage struct {
*storage.StorageBackend
client *azblob.Client
containerName string
}
// Config contains values that define the configuration of an Azure Blob Storage.
type Config struct {
AccountName string
ContainerName string
PrimaryAccountKey string
Endpoint string
RemotePath string
}
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
}
var ep bytes.Buffer
if err := endpointTemplate.Execute(&ep, opts); err != nil {
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
}
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
var client *azblob.Client
if opts.PrimaryAccountKey != "" {
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
}
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
} else {
cred, err := azidentity.NewManagedIdentityCredential(nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
}
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
}
storage := azureBlobStorage{
client: client,
containerName: opts.ContainerName,
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
}
return &storage, nil
}
// Name returns the name of the storage backend
func (b *azureBlobStorage) Name() string {
return "Azure"
}
// Copy copies the given file to the storage backend.
func (b *azureBlobStorage) Copy(file string) error {
fileReader, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
}
_, err = b.client.UploadStream(
context.Background(),
b.containerName,
filepath.Join(b.DestinationPath, filepath.Base(file)),
fileReader,
nil,
)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
}
return nil
}
// Prune rotates away backups according to the configuration and provided
// deadline for the Azure Blob storage backend.
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
Prefix: &lookupPrefix,
})
var matches []string
var totalCount uint
for pager.More() {
resp, err := pager.NextPage(context.Background())
if err != nil {
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
}
for _, v := range resp.Segment.BlobItems {
totalCount++
if v.Properties.LastModified.Before(deadline) {
matches = append(matches, *v.Name)
}
}
}
stats := storage.PruneStats{
Total: totalCount,
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
wg := sync.WaitGroup{}
wg.Add(len(matches))
var errors []error
for _, match := range matches {
name := match
go func() {
_, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil)
if err != nil {
errors = append(errors, err)
}
wg.Done()
}()
}
wg.Wait()
if len(errors) != 0 {
return utilities.Join(errors...)
}
return nil
}); err != nil {
return &stats, err
}
return &stats, nil
}

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package local
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
)
type localStorage struct {
*storage.StorageBackend
latestSymlink string
}
// Config allows configuration of a local storage backend.
type Config struct {
ArchivePath string
LatestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend {
return &localStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.ArchivePath,
Log: logFunc,
},
latestSymlink: opts.LatestSymlink,
}
}
// Name return the name of the storage backend
func (b *localStorage) Name() string {
return "Local"
}
// Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error {
_, name := path.Split(file)
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
}
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the local storage backend.
func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
globPattern := path.Join(
b.DestinationPath,
fmt.Sprintf("%s*", pruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
utilities.Join(removeErrors...),
)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

View File

@@ -1,170 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package s3
import (
"context"
"crypto/x509"
"errors"
"fmt"
"path"
"path/filepath"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
)
type s3Storage struct {
*storage.StorageBackend
client *minio.Client
bucket string
storageClass string
}
// Config contains values that define the configuration of a S3 backend.
type Config struct {
Endpoint string
AccessKeyID string
SecretAccessKey string
IamRoleEndpoint string
EndpointProto string
EndpointInsecure bool
RemotePath string
BucketName string
StorageClass string
CACert *x509.Certificate
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials
if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
creds = credentials.NewStaticV4(
opts.AccessKeyID,
opts.SecretAccessKey,
"",
)
} else if opts.IamRoleEndpoint != "" {
creds = credentials.NewIAM(opts.IamRoleEndpoint)
} else {
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: opts.EndpointProto == "https",
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
}
if opts.EndpointInsecure {
if !options.Secure {
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport.TLSClientConfig.InsecureSkipVerify = true
} else if opts.CACert != nil {
if transport.TLSClientConfig.RootCAs == nil {
transport.TLSClientConfig.RootCAs = x509.NewCertPool()
}
transport.TLSClientConfig.RootCAs.AddCert(opts.CACert)
}
options.Transport = transport
mc, err := minio.New(opts.Endpoint, &options)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
}
return &s3Storage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: mc,
bucket: opts.BucketName,
storageClass: opts.StorageClass,
}, nil
}
// Name returns the name of the storage backend
func (v *s3Storage) Name() string {
return "S3"
}
// Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file)
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
StorageClass: b.storageClass,
}); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return nil, fmt.Errorf(
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return utilities.Join(removeErrors...)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,190 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package ssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sshStorage struct {
*storage.StorageBackend
client *ssh.Client
sftpClient *sftp.Client
hostName string
}
// Config allows to configure a SSH backend.
type Config struct {
HostName string
Port string
User string
Password string
IdentityFile string
IdentityPassphrase string
RemotePath string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod
if opts.Password != "" {
authMethods = append(authMethods, ssh.Password(opts.Password))
}
if _, err := os.Stat(opts.IdentityFile); err == nil {
key, err := ioutil.ReadFile(opts.IdentityFile)
if err != nil {
return nil, errors.New("NewStorageBackend: error reading the private key")
}
var signer ssh.Signer
if opts.IdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: opts.User,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err
}
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
}
return &sshStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: sshClient,
sftpClient: sftpClient,
hostName: opts.HostName,
}, nil
}
// Name returns the name of the storage backend
func (b *sshStorage) Name() string {
return "SSH"
}
// Copy copies the given file to the SSH storage backend.
func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return errors.New("(*sshStorage).Copy: failed to write stream")
}
break
}
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
}
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend.
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,61 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package storage
import (
"time"
)
// Backend is an interface for defining functions which all storage providers support.
type Backend interface {
Copy(file string) error
Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error)
Name() string
}
// StorageBackend is a generic type of storage. Everything here are common properties of all storage types.
type StorageBackend struct {
DestinationPath string
RetentionDays int
Log Log
}
type LogLevel int
const (
LogLevelInfo LogLevel = iota
LogLevelWarning
LogLevelError
)
type Log func(logType LogLevel, context string, msg string, params ...interface{})
// PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct {
Total uint
Pruned uint
}
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}

View File

@@ -1,123 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package webdav
import (
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/studio-b12/gowebdav"
)
type webDavStorage struct {
*storage.StorageBackend
client *gowebdav.Client
url string
}
// Config allows to configure a WebDAV storage backend.
type Config struct {
URL string
RemotePath string
Username string
Password string
URLInsecure bool
}
// NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
if opts.Username == "" || opts.Password == "" {
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
} else {
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
if opts.URLInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
webdavClient.SetTransport(webdavTransport)
}
return &webDavStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: webdavClient,
}, nil
}
}
// Name returns the name of the storage backend
func (b *webDavStorage) Name() string {
return "WebDAV"
}
// Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error {
_, name := path.Split(file)
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
}
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend.
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,24 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package utilities
import (
"errors"
"strings"
)
// Join takes a list of errors and joins them into a single error
func Join(errs ...error) error {
if len(errs) == 1 {
return errs[0]
}
var msgs []string
for _, err := range errs {
if err == nil {
continue
}
msgs = append(msgs, err.Error())
}
return errors.New("[" + strings.Join(msgs, ", ") + "]")
}

View File

@@ -1,58 +0,0 @@
version: '3'
services:
storage:
image: mcr.microsoft.com/azure-storage/azurite
volumes:
- azurite_backup_data:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
az_cli:
image: mcr.microsoft.com/azure-cli
volumes:
- ./local:/dump
command:
- /bin/sh
- -c
- |
az storage container create --name test-container
depends_on:
storage:
condition: service_healthy
environment:
AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1;
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
AZURE_STORAGE_CONTAINER_NAME: test-container
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
AZURE_STORAGE_PATH: 'path/to/backup'
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
azurite_backup_data:
name: azurite_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker-compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker-compose exec backup backup
sleep 5
expect_running_containers "3"
docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz
pass "Remote backups have not been deleted."
docker-compose down --volumes

View File

@@ -1,48 +0,0 @@
version: '3'
services:
minio:
hostname: minio.local
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server --certs-dir "/certs" --address ":443" /data'
volumes:
- minio_backup_data:/data
- ./minio.crt:/certs/public.crt
- ./minio.key:/certs/private.key
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio.local:443
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
AWS_S3_BUCKET_NAME: backup
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./rootCA.crt:/root/minio-rootCA.crt
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
minio_backup_data:
name: minio_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
openssl genrsa -des3 -passout pass:test -out rootCA.key 4096
openssl req -passin pass:test \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc." \
-x509 -new -key rootCA.key -sha256 -days 1 -out rootCA.crt
openssl genrsa -out minio.key 4096
openssl req -new -sha256 -key minio.key \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc./CN=minio" \
-out minio.csr
openssl x509 -req -passin pass:test \
-in minio.csr \
-CA rootCA.crt -CAkey rootCA.key -CAcreateserial \
-extfile san.cnf \
-out minio.crt -days 1 -sha256
openssl x509 -in minio.crt -noout -text
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm -it \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
docker compose down --volumes

View File

@@ -1 +0,0 @@
subjectAltName = DNS:minio.local

View File

@@ -3,15 +3,10 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker network create test_network
docker volume create backup_data
docker volume create app_data
# This volume is created to test whether empty directories are handled
# correctly. It is not supposed to hold any data.
docker volume create empty_data
docker run -d \
--name minio \
@@ -36,7 +31,6 @@ sleep 10
docker run --rm \
--network test_network \
-v app_data:/backup/app_data \
-v empty_data:/backup/empty_data \
-v /var/run/docker.sock:/var/run/docker.sock \
--env AWS_ACCESS_KEY_ID=test \
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
@@ -46,17 +40,21 @@ docker run --rm \
--env BACKUP_FILENAME=test.tar.gz \
--env "BACKUP_FROM_SNAPSHOT=true" \
--entrypoint backup \
offen/docker-volume-backup:${TEST_VERSION:-canary}
offen/docker-volume-backup:$TEST_VERSION
docker run --rm -it \
-v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
echo "[TEST:PASS] Found relevant files in untared backup."
# This test does not stop containers during backup. This is happening on
# purpose in order to cover this setup as well.
expect_running_containers "2"
if [ "$(docker ps -q | wc -l)" != "2" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen)
docker volume rm backup_data app_data

View File

@@ -1,51 +0,0 @@
version: '3.8'
services:
database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
# this is testing the deprecated label on purpose
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.exec-label=test
volumes:
- app_data:/tmp/volume
other_database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=touch /tmp/volume/not-relevant.txt
- docker-volume-backup.exec-label=not-relevant
volumes:
- app_data:/tmp/volume
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_LABEL: test
EXEC_FORWARD_OUTPUT: "true"
volumes:
- archive:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -1,64 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 30 # mariadb likes to take a bit before responding
docker compose exec backup backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
if [ -f ./backup/data/not-relevant.txt ]; then
fail "Command ran for container with other label."
fi
pass "Command did not run for container with other label."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
fi
pass "Did not find unexpected file."
docker compose down --volumes
sudo rm -rf ./local
info "Running commands test in swarm mode next."
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
fi
pass "Did not find unexpected file."
docker stack rm test_stack
docker swarm leave --force

View File

@@ -10,11 +10,10 @@ services:
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- minio_backup_data:/data
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
restart: always
@@ -24,13 +23,17 @@ services:
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
BACKUP_UID: ${BACKUP_UID:-1000}
BACKUP_GID: ${BACKUP_GID:-1000}
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
@@ -42,6 +45,5 @@ services:
- app_data:/var/opt/offen
volumes:
minio_backup_data:
name: minio_backup_data
backup_data:
app_data:

65
test/compose/run.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
BACKUP_UID=$(id -u) BACKUP_GID=$(id -g) docker-compose up -d
sleep 5
BACKUP_UID=$(id -u) BACKUP_GID=$(id -g) docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
BACKUP_UID=$(id -u) BACKUP_GID=$(id -g) docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test.tar.gz.gpg > /tmp/test.tar.gz && tar -xf /tmp/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared remote backup."
test -L ./local/test.latest.tar.gz.gpg
owner=$(stat -c '%U:%G' ./local/test.tar.gz.gpg)
if [ "$owner" != "$(id -un):$(id -gn)" ]; then
echo "[TEST:FAIL] Expected backup file to have correct owners, expected "$(id -un):$(id -gn)", got $owner"
exit 1
fi
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
test -L /tmp/backup/app_data/db.link
echo "[TEST:PASS] Found relevant files in untared local backup."
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="conf.tar.gz"
BACKUP_CRON_EXPRESSION="*/1 * * * *"

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="other.tar.gz"
BACKUP_CRON_EXPRESSION="*/1 * * * *"

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="never.tar.gz"
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"

View File

@@ -1,23 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,31 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100
docker compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then
fail "Config from file was not used."
fi
pass "Config from file was used."
if [ ! -f ./local/other.tar.gz ]; then
fail "Run on same schedule did not succeed."
fi
pass "Run on same schedule succeeded."
if [ -f ./local/never.tar.gz ]; then
fail "Unexpected file was found."
fi
pass "Unexpected cron did not run."

View File

@@ -1,4 +0,0 @@
ARG version=canary
FROM offen/docker-volume-backup:$version
RUN apk add rsync

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'mkdir -p /tmp/unpack && tar -xvf $$COMMAND_RUNTIME_ARCHIVE_FILEPATH -C /tmp/unpack && rsync -r /tmp/unpack/backup/app_data /local'
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/local
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,28 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "2"
if [ ! -f "./local/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
docker compose down --volumes

1
test/gpg/.gitignore vendored
View File

@@ -1 +0,0 @@
local

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
GPG_PASSPHRASE: 1234#$$ecret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,33 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
expect_running_containers "2"
tmp_dir=$(mktemp -d)
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
fail "Could not find expected file in untared archive."
fi
rm ./local/decrypted.tar.gz
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup."
fi
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,15 +0,0 @@
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
volumes:
- ./local:/archive
- ./sources:/backup/data:ro

View File

@@ -1,28 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
docker compose down --volumes
out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
if [ ! -f "$out/backup/data/me.txt" ]; then
fail "Expected file was not found."
fi
pass "Expected file was found."
if [ -f "$out/backup/data/skip.me" ]; then
fail "Ignored file was found."
fi
pass "Ignored file was not found."

View File

@@ -1 +0,0 @@
local

View File

@@ -1,29 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,55 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker compose exec backup backup
sleep 5
expect_running_containers "2"
tmp_dir=$(mktemp -d)
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
rm -f ./local/test-hostnametoken.tar.gz
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
fail "Could not find expected symlink in untared archive."
fi
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
fail "Could not find symlink to latest version."
fi
pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi
pass "Local backups have not been deleted."
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,37 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_PRUNING_PREFIX: test
NOTIFICATION_LEVEL: info
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
EXTRA_VALUE: extra-value
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- ./notifications.tmpl:/etc/dockervolumebackup/notifications.d/notifications.tmpl
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
gotify:
image: gotify/server
ports:
- 8080:80
environment:
- GOTIFY_DEFAULTUSER_PASS=custom
volumes:
- gotify_data:/app/data
volumes:
app_data:
gotify_data:

View File

@@ -1,7 +0,0 @@
{{ define "title_success" -}}
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
{{- end }}
{{ define "body_success" -}}
Backing up {{ .Stats.BackupFile.FullPath }} succeeded.
{{- end }}

View File

@@ -1,50 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
info "Set up Gotify application using token $GOTIFY_TOKEN"
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then
fail "Expected no notifications to be sent when not configured"
fi
pass "No notifications were sent when not configured."
docker compose down
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker compose up -d
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then
fail "Expected one notifications to be sent when configured"
fi
pass "Correct number of notifications were sent when configured."
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
fail "Unexpected notification title $MESSAGE_TITLE"
fi
pass "Custom notification title was used."
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
fail "Unexpected notification body $MESSAGE_BODY"
fi
pass "Custom notification body was used."
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,27 +0,0 @@
version: '3'
services:
db:
image: postgres:14-alpine
restart: unless-stopped
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
- POSTGRES_USER=test
- POSTGRES_DB=test
backup:
image: offen/docker-volume-backup:${TEST_VERSION}
restart: always
environment:
BACKUP_FILENAME: backup.tar.gz
volumes:
- postgres_data:/backup/postgres:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./local:/archive
volumes:
postgres_data:

View File

@@ -1,30 +0,0 @@
#!/bin/sh
# This test refers to https://github.com/offen/docker-volume-backup/issues/71
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
sudo find $tmp_dir/backup/postgres > /dev/null
pass "Backup contains files at expected location"
for file in $(sudo find $tmp_dir/backup/postgres); do
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
fi
done
pass "All files and directories in backup preserved their ownership."
docker compose down --volumes

View File

@@ -1,42 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm -it \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm -it \
-v minio_backup_data:/minio_data \
alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -1,78 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_root_user
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_root_password
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
secrets:
- minio_root_user
- minio_root_password
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data
secrets:
minio_root_user:
external: true
minio_root_password:
external: true

View File

@@ -1,44 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
printf "test" | docker secret create minio_root_user -
printf "GMusLtUmILge2by+z890kQ" | docker secret create minio_root_password -
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \
-v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
docker stack rm test_stack
docker secret rm minio_root_password
docker secret rm minio_root_user
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,47 +0,0 @@
version: '3'
services:
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- ssh
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes:
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
ssh_backup_data:
name: ssh_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers 3
docker run --rm -it \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm -it \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@@ -18,8 +18,8 @@ services:
volumes:
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION}
depends_on:
- minio
deploy:
@@ -43,15 +43,13 @@ services:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:14-alpine
image: postgres:12.2-alpine
environment:
POSTGRES_PASSWORD: example
labels:
@@ -64,6 +62,4 @@ services:
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data

View File

@@ -3,15 +3,13 @@
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1
done
@@ -20,18 +18,19 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm -it \
-v backup_data:/data alpine \
-v test_stack_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
echo "[TEST:PASS] Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
if [ "$(docker ps -q | wc -l)" != "5" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,23 +0,0 @@
#!/bin/sh
set -e
info () {
echo "[test:${current_test:-none}:info] "$1""
}
pass () {
echo "[test:${current_test:-none}:pass] "$1""
}
fail () {
echo "[test:${current_test:-none}:fail] "$1""
exit 1
}
expect_running_containers () {
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
fi
pass "$1 containers running."
}

View File

@@ -1,45 +0,0 @@
version: '3'
services:
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- webdav
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
webdav_backup_data:
name: webdav_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm -it \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm -it \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes