Compare commits

..

13 Commits

Author SHA1 Message Date
Frederik Ring
053422ba0e only tag proper releases as latest 2021-08-22 20:16:24 +02:00
Frederik Ring
39b933b8e8 improve logging messages 2021-08-22 20:03:18 +02:00
Frederik Ring
8161ad7f8f add insecure option, update docs 2021-08-22 19:26:34 +02:00
Frederik Ring
34e01fa303 adapt repo layout to go 2021-08-22 18:16:40 +02:00
Frederik Ring
2554c538ea add logging 2021-08-22 18:00:10 +02:00
Frederik Ring
77f948d4da refactor deferred cleanup actions to always run 2021-08-22 16:19:41 +02:00
Frederik Ring
d388785222 implement pruning from remote storage 2021-08-22 15:17:23 +02:00
Frederik Ring
b46c402b19 add gpg encryption 2021-08-22 14:44:33 +02:00
Frederik Ring
29f8a078bc implement deletion of local backups 2021-08-22 14:19:25 +02:00
Frederik Ring
3f7c08d616 implement lock file to ensure backup runs mutually exclusive 2021-08-22 12:02:22 +02:00
Frederik Ring
8b7d27740a implement copy to remote storage 2021-08-21 21:38:26 +02:00
Frederik Ring
9ddddc139a scaffold script flow 2021-08-21 19:55:22 +02:00
Frederik Ring
d34b525804 try porting docker related parts to golang 2021-08-21 19:05:49 +02:00
84 changed files with 1363 additions and 5942 deletions

66
.circleci/config.yml Normal file
View File

@@ -0,0 +1,66 @@
version: 2.1
jobs:
canary:
machine:
image: ubuntu-1604:202007-01
working_directory: ~/docker-volume-backup
steps:
- checkout
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
./test.sh canary
build:
docker:
- image: cimg/base:2020.06
environment:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
command: |
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
docker run --rm --privileged linuxkit/binfmt:v0.8
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
tag_args="-t offen/docker-volume-backup:$CIRCLE_TAG"
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
tag_args="$tag_args -t offen/docker-volume-backup:latest"
fi
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
$tag_args . --push
workflows:
version: 2
docker_image:
jobs:
- canary:
filters:
tags:
ignore: /^v.*/
- build:
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
orbs:
docker: circleci/docker@1.0.1

View File

@@ -1,7 +1 @@
test test
.github
.circleci
docs
.editorconfig
LICENSE
README.md

3
.github/FUNDING.yml vendored
View File

@@ -1,3 +0,0 @@
github: offen
patreon: offen

View File

@@ -1,34 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
**To Reproduce**
Steps to reproduce the behavior:
1. ...
2. ...
3. ...
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Version (please complete the following information):**
- Image Version: <!-- e.g. v2.21.0 -->
- Docker Version: <!-- e.g. 20.10.17 -->
- Docker Compose Version (if applicable): <!-- e.g. 1.29.2 -->
**Additional context**
<!--
Add any other context about the problem here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->

View File

@@ -1,28 +0,0 @@
---
name: Support request
about: Ask for help
title: ''
labels: ''
assignees: ''
---
**What are you trying to do?**
<!--
A clear and concise description of what you are trying to do, but cannot get working.
-->
**What is your current configuration?**
<!--
Add the full configuration you are using. Please redact out any real-world credentials.
-->
**Log output**
<!--
Provide the full log output of your setup.
-->
**Additional context**
<!--
Add any other context or screenshots about the support request here.
-->

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: docker
directory: /
schedule:
interval: weekly
- package-ecosystem: gomod
directory: /
schedule:
interval: weekly

View File

@@ -1,59 +0,0 @@
name: Release Docker Image
on:
push:
tags: v**
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract Docker tags
id: meta
run: |
version_tag="${{github.ref_name}}"
tags=($version_tag)
if [[ "$version_tag" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest` nor `v2`
tags+=("latest")
tags+=($(echo "$version_tag" | cut -d. -f1))
fi
releases=""
for tag in "${tags[@]}"; do
releases="${releases:+$releases,}offen/docker-volume-backup:$tag,ghcr.io/offen/docker-volume-backup:$tag"
done
echo "releases=$releases" >> "$GITHUB_OUTPUT"
- name: Build and push Docker images
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.releases }}

View File

@@ -1,30 +0,0 @@
name: Run Integration Tests
on:
push:
branches:
- main
pull_request:
jobs:
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build Docker Image
env:
DOCKER_BUILDKIT: '1'
run: docker build . -t offen/docker-volume-backup:test
- name: Run Tests
working-directory: ./test
run: |
# Stop the buildx container so the tests can make assertions
# about the number of running containers
docker rm -f $(docker ps -aq)
export GPG_TTY=$(tty)
./test.sh test

View File

@@ -1,21 +1,22 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de> # Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0 # SPDX-License-Identifier: MPL-2.0
FROM golang:1.20-alpine as builder FROM golang:1.17-alpine as builder
WORKDIR /app WORKDIR /app
COPY . . COPY go.mod go.sum ./
RUN go mod download COPY cmd/backup/main.go ./cmd/backup/main.go
WORKDIR /app/cmd/backup RUN go build -o backup cmd/backup/main.go
RUN go build -o backup .
FROM alpine:3.18 FROM alpine:3.14
WORKDIR /root WORKDIR /root
RUN apk add --no-cache ca-certificates busybox-extras RUN apk add --update ca-certificates
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup COPY --from=builder /app/backup /usr/bin/backup
COPY --chmod=755 ./entrypoint.sh /root/
COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/root/entrypoint.sh"] ENTRYPOINT ["/root/entrypoint.sh"]

22
NOTICE Normal file
View File

@@ -0,0 +1,22 @@
Copyright 2021 Offen Authors <hioffen@posteo.de>
This project contains software that is Copyright (c) 2018 Futurice
Licensed under the Terms of the MIT License:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

1414
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,133 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
// Licensed under the MIT License: https://github.com/walle/targz/blob/57fe4206da5abf7dd3901b4af3891ec2f08c7b08/LICENSE
package main
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
)
func createArchive(files []string, inputFilePath, outputFilePath string) error {
inputFilePath = stripTrailingSlashes(inputFilePath)
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
if err != nil {
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
}
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
return fmt.Errorf("createArchive: error creating output file path: %w", err)
}
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
return fmt.Errorf("createArchive: error creating archive: %w", err)
}
return nil
}
func stripTrailingSlashes(path string) string {
if len(path) > 0 && path[len(path)-1] == '/' {
path = path[0 : len(path)-1]
}
return path
}
func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error) {
inputFilePath, err := filepath.Abs(inputFilePath)
if err == nil {
outputFilePath, err = filepath.Abs(outputFilePath)
}
return inputFilePath, outputFilePath, err
}
func compress(paths []string, outFilePath, subPath string) error {
file, err := os.Create(outFilePath)
if err != nil {
return fmt.Errorf("compress: error creating out file: %w", err)
}
prefix := path.Dir(outFilePath)
gzipWriter := gzip.NewWriter(file)
tarWriter := tar.NewWriter(gzipWriter)
for _, p := range paths {
if err := writeTarGz(p, tarWriter, prefix); err != nil {
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
}
}
err = tarWriter.Close()
if err != nil {
return fmt.Errorf("compress: error closing tar writer: %w", err)
}
err = gzipWriter.Close()
if err != nil {
return fmt.Errorf("compress: error closing gzip writer: %w", err)
}
err = file.Close()
if err != nil {
return fmt.Errorf("compress: error closing file: %w", err)
}
return nil
}
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
fileInfo, err := os.Lstat(path)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
}
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
return nil
}
var link string
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
var err error
if link, err = os.Readlink(path); err != nil {
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
}
}
header, err := tar.FileInfoHeader(fileInfo, link)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
}
header.Name = strings.TrimPrefix(path, prefix)
err = tarWriter.WriteHeader(header)
if err != nil {
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
}
if !fileInfo.Mode().IsRegular() {
return nil
}
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
}
defer file.Close()
_, err = io.Copy(tarWriter, file)
if err != nil {
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
}
return nil
}

View File

@@ -1,120 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"regexp"
"time"
)
// Config holds all configuration values that are expected to be set
// by users.
type Config struct {
AwsS3BucketName string `split_words:"true"`
AwsS3Path string `split_words:"true"`
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
AwsEndpointProto string `split_words:"true" default:"https"`
AwsEndpointInsecure bool `split_words:"true"`
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
AwsStorageClass string `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
AwsPartSize int64 `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
BackupPruningPrefix string `split_words:"true"`
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
GpgPassphrase string `split_words:"true"`
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
NotificationLevel string `split_words:"true" default:"error"`
EmailNotificationRecipient string `split_words:"true"`
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
WebdavUrl string `split_words:"true"`
WebdavUrlInsecure bool `split_words:"true"`
WebdavPath string `split_words:"true" default:"/"`
WebdavUsername string `split_words:"true"`
WebdavPassword string `split_words:"true"`
SSHHostName string `split_words:"true"`
SSHPort string `split_words:"true" default:"22"`
SSHUser string `split_words:"true"`
SSHPassword string `split_words:"true"`
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
SSHIdentityPassphrase string `split_words:"true"`
SSHRemotePath string `split_words:"true"`
ExecLabel string `split_words:"true"`
ExecForwardOutput bool `split_words:"true"`
LockTimeout time.Duration `split_words:"true" default:"60m"`
AzureStorageAccountName string `split_words:"true"`
AzureStoragePrimaryAccountKey string `split_words:"true"`
AzureStorageContainerName string `split_words:"true"`
AzureStoragePath string `split_words:"true"`
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
}
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
if secretPath == "" {
return envVar, nil
}
data, err := os.ReadFile(secretPath)
if err != nil {
return "", fmt.Errorf("resolveSecret: error reading secret path: %w", err)
}
return string(data), nil
}
type CertDecoder struct {
Cert *x509.Certificate
}
func (c *CertDecoder) Decode(v string) error {
if v == "" {
return nil
}
content, err := ioutil.ReadFile(v)
if err != nil {
content = []byte(v)
}
block, _ := pem.Decode(content)
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("config: error parsing certificate: %w", err)
}
*c = CertDecoder{Cert: cert}
return nil
}
type RegexpDecoder struct {
Re *regexp.Regexp
}
func (r *RegexpDecoder) Decode(v string) error {
if v == "" {
return nil
}
re, err := regexp.Compile(v)
if err != nil {
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
}
*r = RegexpDecoder{Re: re}
return nil
}

View File

@@ -1,204 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
// Licensed under the Apache 2.0 License: https://github.com/moby/moby/blob/8e610b2b55bfd1bfa9436ab110d311f5e8a74dcb/LICENSE
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/cosiner/argv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/sync/errgroup"
)
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) {
args, _ := argv.Argv(command, nil, nil)
commandEnv := []string{
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
}
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
Cmd: args[0],
AttachStdin: true,
AttachStderr: true,
Env: commandEnv,
User: user,
})
if err != nil {
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
}
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
if err != nil {
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
}
defer resp.Close()
var outBuf, errBuf bytes.Buffer
outputDone := make(chan error)
go func() {
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
outputDone <- err
}()
select {
case err := <-outputDone:
if err != nil {
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
}
break
}
stdout, err := ioutil.ReadAll(&outBuf)
if err != nil {
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
}
stderr, err := ioutil.ReadAll(&errBuf)
if err != nil {
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
}
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
if err != nil {
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
}
if res.ExitCode > 0 {
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
}
return stdout, stderr, nil
}
func (s *script) runLabeledCommands(label string) error {
f := []filters.KeyValuePair{
{Key: "label", Value: label},
}
if s.c.ExecLabel != "" {
f = append(f, filters.KeyValuePair{
Key: "label",
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
})
}
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
var hasDeprecatedContainers bool
if label == "docker-volume-backup.archive-pre" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-pre",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if label == "docker-volume-backup.archive-post" {
f[0] = filters.KeyValuePair{
Key: "label",
Value: "docker-volume-backup.exec-post",
}
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(f...),
})
if err != nil {
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
}
if len(deprecatedContainers) != 0 {
hasDeprecatedContainers = true
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
}
}
if len(containersWithCommand) == 0 {
return nil
}
if hasDeprecatedContainers {
s.logger.Warn(
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
)
}
g := new(errgroup.Group)
for _, container := range containersWithCommand {
c := container
g.Go(func() error {
cmd, ok := c.Labels[label]
if !ok && label == "docker-volume-backup.archive-pre" {
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
} else if !ok && label == "docker-volume-backup.archive-post" {
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
}
userLabelName := fmt.Sprintf("%s.user", label)
user := c.Labels[userLabelName]
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
stdout, stderr, err := s.exec(c.ID, cmd, user)
if s.c.ExecForwardOutput {
os.Stderr.Write(stderr)
os.Stdout.Write(stdout)
}
if err != nil {
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
}
return nil
})
}
if err := g.Wait(); err != nil {
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
}
return nil
}
type lifecyclePhase string
const (
lifecyclePhaseArchive lifecyclePhase = "archive"
lifecyclePhaseProcess lifecyclePhase = "process"
lifecyclePhaseCopy lifecyclePhase = "copy"
lifecyclePhasePrune lifecyclePhase = "prune"
)
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
if s.cli == nil {
return cb
}
return func() error {
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
}
defer func() {
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
}()
return cb()
}
}

View File

@@ -1,57 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"errors"
"fmt"
"sort"
)
// hook contains a queued action that can be trigger them when the script
// reaches a certain point (e.g. unsuccessful backup)
type hook struct {
level hookLevel
action func(err error) error
}
type hookLevel int
const (
hookLevelPlumbing hookLevel = iota
hookLevelError
hookLevelInfo
)
var hookLevels = map[string]hookLevel{
"info": hookLevelInfo,
"error": hookLevelError,
}
// registerHook adds the given action at the given level.
func (s *script) registerHook(level hookLevel, action func(err error) error) {
s.hooks = append(s.hooks, hook{level, action})
}
// runHooks runs all hooks that have been registered using the
// given levels in the defined ordering. In case executing a hook returns an
// error, the following hooks will still be run before the function returns.
func (s *script) runHooks(err error) error {
sort.SliceStable(s.hooks, func(i, j int) bool {
return s.hooks[i].level < s.hooks[j].level
})
var actionErrors []error
for _, hook := range s.hooks {
if hook.level > s.hookLevel {
continue
}
if actionErr := hook.action(err); actionErr != nil {
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
}
}
if len(actionErrors) != 0 {
return errors.Join(actionErrors...)
}
return nil
}

View File

@@ -1,58 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"errors"
"fmt"
"time"
"github.com/gofrs/flock"
)
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. In case the lock is currently blocked
// by another execution, it will repeatedly retry until the lock is available
// or the given timeout is exceeded.
func (s *script) lock(lockfile string) (func() error, error) {
start := time.Now()
defer func() {
s.stats.LockedTime = time.Now().Sub(start)
}()
retry := time.NewTicker(5 * time.Second)
defer retry.Stop()
deadline := time.NewTimer(s.c.LockTimeout)
defer deadline.Stop()
fileLock := flock.New(lockfile)
for {
acquired, err := fileLock.TryLock()
if err != nil {
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
}
if acquired {
if s.encounteredLock {
s.logger.Info("Acquired exclusive lock on subsequent attempt, ready to continue.")
}
return fileLock.Unlock, nil
}
if !s.encounteredLock {
s.logger.Infof(
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
s.c.LockTimeout,
)
s.encounteredLock = true
}
select {
case <-retry.C:
continue
case <-deadline.C:
return noop, errors.New("lock: timed out waiting for lockfile to become available")
}
}
}

View File

@@ -1,58 +1,492 @@
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de> // Copyright 2021 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0 // SPDX-License-Identifier: MPL-2.0
package main package main
import ( import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os" "os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/joho/godotenv"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/sirupsen/logrus"
"github.com/walle/targz"
"golang.org/x/crypto/openpgp"
) )
func main() { func main() {
s, err := newScript() unlock := lock("/var/dockervolumebackup.lock")
defer unlock()
s := &script{}
s.must(s.init())
s.must(s.stopContainersAndRun(s.takeBackup))
s.must(s.encryptBackup())
s.must(s.copyBackup())
s.must(s.cleanBackup())
s.must(s.pruneOldBackups())
}
type script struct {
ctx context.Context
cli *client.Client
mc *minio.Client
logger *logrus.Logger
file string
bucket string
archive string
sources string
passphrase string
}
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. When invoked while the file is
// still locked the function panics.
func lock(lockfile string) func() error {
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
if err != nil { if err != nil {
panic(err) panic(err)
} }
return func() error {
if err := lf.Close(); err != nil {
return fmt.Errorf("lock: error releasing file lock: %w", err)
}
if err := os.Remove(lockfile); err != nil {
return fmt.Errorf("lock: error removing lock file: %w", err)
}
return nil
}
}
unlock, err := s.lock("/var/lock/dockervolumebackup.lock") // init creates all resources needed for the script to perform actions against
defer unlock() // remote resources like the Docker engine or remote storage locations.
s.must(err) func (s *script) init() error {
s.ctx = context.Background()
s.logger = logrus.New()
s.logger.SetOutput(os.Stdout)
defer func() { if err := godotenv.Load("/etc/backup.env"); err != nil {
if pArg := recover(); pArg != nil { return fmt.Errorf("init: failed to load env file: %w", err)
if err, ok := pArg.(error); ok { }
if hookErr := s.runHooks(err); hookErr != nil {
s.logger.Errorf("An error occurred calling the registered hooks: %s", hookErr) _, err := os.Stat("/var/run/docker.sock")
} if !os.IsNotExist(err) {
os.Exit(1) cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return fmt.Errorf("init: failed to create docker client")
}
s.cli = cli
}
if bucket := os.Getenv("AWS_S3_BUCKET_NAME"); bucket != "" {
s.bucket = bucket
mc, err := minio.New(os.Getenv("AWS_ENDPOINT"), &minio.Options{
Creds: credentials.NewStaticV4(
os.Getenv("AWS_ACCESS_KEY_ID"),
os.Getenv("AWS_SECRET_ACCESS_KEY"),
"",
),
Secure: os.Getenv("AWS_ENDPOINT_INSECURE") == "" && os.Getenv("AWS_ENDPOINT_PROTO") == "https",
})
if err != nil {
return fmt.Errorf("init: error setting up minio client: %w", err)
}
s.mc = mc
}
file := os.Getenv("BACKUP_FILENAME")
if file == "" {
return errors.New("init: BACKUP_FILENAME not given")
}
s.file = path.Join("/tmp", file)
s.archive = os.Getenv("BACKUP_ARCHIVE")
s.sources = os.Getenv("BACKUP_SOURCES")
s.passphrase = os.Getenv("GPG_PASSPHRASE")
return nil
}
// stopContainersAndRun stops all Docker containers that are marked as to being
// stopped during the backup and runs the given thunk. After returning, it makes
// sure containers are being restarted if required.
func (s *script) stopContainersAndRun(thunk func() error) error {
if s.cli == nil {
return nil
}
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
Quiet: true,
})
if err != nil {
return fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
)
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: containerLabel,
}),
})
if err != nil {
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
}
s.logger.Infof(
"Stopping %d containers labeled `%s` out of %d running containers.",
len(containersToStop),
containerLabel,
len(allContainers),
)
var stoppedContainers []types.Container
var errors []error
if len(containersToStop) != 0 {
for _, container := range containersToStop {
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
errors = append(errors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
}
defer func() error {
servicesRequiringUpdate := map[string]struct{}{}
var restartErrors []error
for _, container := range stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
servicesRequiringUpdate[swarmServiceName] = struct{}{}
continue
}
if err := s.cli.ContainerStart(s.ctx, container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
} }
panic(pArg)
} }
if err := s.runHooks(nil); err != nil { if len(servicesRequiringUpdate) != 0 {
s.logger.Errorf( services, _ := s.cli.ServiceList(s.ctx, types.ServiceListOptions{})
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v", for serviceName := range servicesRequiringUpdate {
var serviceMatch swarm.Service
for _, service := range services {
if service.Spec.Name == serviceName {
serviceMatch = service
break
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainersAndRun: Couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
_, err := s.cli.ServiceUpdate(
s.ctx, serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
)
if err != nil {
restartErrors = append(restartErrors, err)
}
}
}
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
len(restartErrors),
err, err,
) )
os.Exit(1)
} }
s.logger.Info("Finished running backup tasks.") s.logger.Infof("Successfully restarted %d containers.", len(stoppedContainers))
return nil
}() }()
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error { var stopErr error
restartContainers, err := s.stopContainers() if len(errors) != 0 {
// The mechanism for restarting containers is not using hooks as it stopErr = fmt.Errorf(
// should happen as soon as possible (i.e. before uploading backups or "stopContainersAndRun: %d errors stopping containers: %w",
// similar). len(errors),
defer func() { err,
s.must(restartContainers()) )
}() }
if err != nil { if stopErr != nil {
return err return stopErr
} }
return s.createArchive()
})())
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)()) return thunk()
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)()) }
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
// takeBackup creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) takeBackup() error {
outBytes, err := exec.Command("date", fmt.Sprintf("+%s", s.file)).Output()
if err != nil {
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
}
s.file = strings.TrimSpace(string(outBytes))
if err := targz.Compress(s.sources, s.file); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
}
s.logger.Infof("Successfully created backup of `%s` at `%s`.", s.sources, s.file)
return nil
}
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptBackup() error {
if s.passphrase == "" {
return nil
}
buf := bytes.NewBuffer(nil)
_, name := path.Split(s.file)
pt, err := openpgp.SymmetricallyEncrypt(buf, []byte(s.passphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
if err != nil {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
}
unencrypted, err := ioutil.ReadFile(s.file)
if err != nil {
pt.Close()
return fmt.Errorf("encryptBackup: error reading unencrypted backup file: %w", err)
}
_, err = pt.Write(unencrypted)
if err != nil {
pt.Close()
return fmt.Errorf("encryptBackup: error writing backup contents: %w", err)
}
pt.Close()
gpgFile := fmt.Sprintf("%s.gpg", s.file)
if err := ioutil.WriteFile(gpgFile, buf.Bytes(), os.ModeAppend); err != nil {
return fmt.Errorf("encryptBackup: error writing encrypted version of backup: %w", err)
}
if err := os.Remove(s.file); err != nil {
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
}
s.file = gpgFile
s.logger.Infof("Successfully encrypted backup using given passphrase, saving as `%s`.", s.file)
return nil
}
// copyBackup makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyBackup() error {
_, name := path.Split(s.file)
if s.bucket != "" {
_, err := s.mc.FPutObject(s.ctx, s.bucket, name, s.file, minio.PutObjectOptions{
ContentType: "application/tar+gzip",
})
if err != nil {
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
}
s.logger.Infof("Successfully uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
}
if s.archive != "" {
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
}
s.logger.Infof("Successfully stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
}
return nil
}
// cleanBackup removes the backup file from disk.
func (s *script) cleanBackup() error {
if err := os.Remove(s.file); err != nil {
return fmt.Errorf("cleanBackup: error removing file: %w", err)
}
s.logger.Info("Successfully cleaned up local artifacts.")
return nil
}
// pruneOldBackups rotates away backups from local and remote storages using
// the given configuration. In case the given configuration would delete all
// backups, it does nothing instead.
func (s *script) pruneOldBackups() error {
retention := os.Getenv("BACKUP_RETENTION_DAYS")
if retention == "" {
return nil
}
retentionDays, err := strconv.Atoi(retention)
if err != nil {
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
}
leeway := os.Getenv("BACKUP_PRUNING_LEEWAY")
sleepFor, err := time.ParseDuration(leeway)
if err != nil {
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
}
s.logger.Infof("Sleeping for %s before pruning backups.", leeway)
time.Sleep(sleepFor)
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
deadline := time.Now().AddDate(0, 0, -retentionDays)
if s.bucket != "" {
candidates := s.mc.ListObjects(s.ctx, s.bucket, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: os.Getenv("BACKUP_PRUNING_PREFIX"),
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return fmt.Errorf("pruneOldBackups: error looking up candidates from remote storage: %w", candidate.Err)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
if len(matches) != 0 && len(matches) != lenCandidates {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := s.mc.RemoveObjects(s.ctx, s.bucket, objectsCh, minio.RemoveObjectsOptions{})
var errors []error
for result := range errChan {
if result.Err != nil {
errors = append(errors, result.Err)
}
}
if len(errors) != 0 {
return fmt.Errorf(
"pruneOldBackups: %d errors removing files from remote storage: %w",
len(errors),
errors[0],
)
}
s.logger.Infof(
"Successfully pruned %d out of %d remote backups as their age exceeded the configured retention period.",
len(matches),
lenCandidates,
)
} else if len(matches) != 0 && len(matches) == lenCandidates {
s.logger.Warnf(
"The current configuration would delete all %d remote backup copies. Refusing to do so, please check your configuration.",
len(matches),
)
} else {
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
}
}
if s.archive != "" {
candidates, err := filepath.Glob(
path.Join(s.archive, fmt.Sprintf("%s*", os.Getenv("BACKUP_PRUNING_PREFIX"))),
)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error looking up matching files, starting with: %w", err,
)
}
var matches []os.FileInfo
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, fi)
}
}
if len(matches) != 0 && len(matches) != len(candidates) {
var errors []error
for _, candidate := range matches {
if err := os.Remove(candidate.Name()); err != nil {
errors = append(errors, err)
}
}
if len(errors) != 0 {
return fmt.Errorf(
"pruneOldBackups: %d errors deleting local files, starting with: %w",
len(errors),
errors[0],
)
}
s.logger.Infof(
"Successfully pruned %d out of %d local backups as their age exceeded the configured retention period.",
len(matches),
len(candidates),
)
} else if len(matches) != 0 && len(matches) == len(candidates) {
s.logger.Warnf(
"The current configuration would delete all %d local backup copies. Refusing to do so, please check your configuration.",
len(matches),
)
} else {
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
}
}
return nil
}
func (s *script) must(err error) {
if err != nil {
if s.logger == nil {
panic(err)
}
s.logger.Errorf("Fatal error running backup: %s", err)
os.Exit(1)
}
}
func copy(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
} }

View File

@@ -1,108 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
_ "embed"
"errors"
"fmt"
"os"
"text/template"
"time"
sTypes "github.com/containrrr/shoutrrr/pkg/types"
)
//go:embed notifications.tmpl
var defaultNotifications string
// NotificationData data to be passed to the notification templates
type NotificationData struct {
Error error
Config *Config
Stats *Stats
}
// notify sends a notification using the given title and body templates.
// Automatically creates notification data, adding the given error
func (s *script) notify(titleTemplate string, bodyTemplate string, err error) error {
params := NotificationData{
Error: err,
Stats: s.stats,
Config: s.c,
}
titleBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
}
bodyBuf := &bytes.Buffer{}
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
}
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
return fmt.Errorf("notify: error notifying: %w", err)
}
return nil
}
// notifyFailure sends a notification about a failed backup run
func (s *script) notifyFailure(err error) error {
return s.notify("title_failure", "body_failure", err)
}
// notifyFailure sends a notification about a successful backup run
func (s *script) notifySuccess() error {
return s.notify("title_success", "body_success", nil)
}
// sendNotification sends a notification to all configured third party services
func (s *script) sendNotification(title, body string) error {
var errs []error
for _, result := range s.sender.Send(body, &sTypes.Params{"title": title}) {
if result != nil {
errs = append(errs, result)
}
}
if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
}
return nil
}
var templateHelpers = template.FuncMap{
"formatTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"formatBytesDec": func(bytes uint64) string {
return formatBytes(bytes, true)
},
"formatBytesBin": func(bytes uint64) string {
return formatBytes(bytes, false)
},
"env": os.Getenv,
}
// formatBytes converts an amount of bytes in a human-readable representation
// the decimal parameter specifies if using powers of 1000 (decimal) or powers of 1024 (binary)
func formatBytes(b uint64, decimal bool) string {
unit := uint64(1024)
format := "%.1f %ciB"
if decimal {
unit = uint64(1000)
format = "%.1f %cB"
}
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := unit, 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf(format, float64(b)/float64(div), "kMGTPE"[exp])
}

View File

@@ -1,26 +0,0 @@
{{ define "title_failure" -}}
Failure running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
{{- end }}
{{ define "body_failure" -}}
Running docker-volume-backup failed with error: {{ .Error }}
Log output of the failed run was:
{{ .Stats.LogOutput }}
{{- end }}
{{ define "title_success" -}}
Success running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
{{- end }}
{{ define "body_success" -}}
Running docker-volume-backup succeeded.
Log output was:
{{ .Stats.LogOutput }}
{{- end }}

View File

@@ -1,578 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"text/template"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/storage/azure"
"github.com/offen/docker-volume-backup/internal/storage/local"
"github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types"
ctr "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/kelseyhightower/envconfig"
"github.com/leekchan/timeutil"
"github.com/otiai10/copy"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/openpgp"
"golang.org/x/sync/errgroup"
)
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct {
cli *client.Client
storages []storage.Backend
logger *logrus.Logger
sender *router.ServiceRouter
template *template.Template
hooks []hook
hookLevel hookLevel
file string
stats *Stats
encounteredLock bool
c *Config
}
// newScript creates all resources needed for the script to perform actions against
// remote resources like the Docker engine or remote storage locations. All
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: &Config{},
logger: &logrus.Logger{
Out: stdOut,
Formatter: new(logrus.TextFormatter),
Hooks: make(logrus.LevelHooks),
Level: logrus.InfoLevel,
},
stats: &Stats{
StartTime: time.Now(),
LogOutput: logBuffer,
Storages: map[string]StorageStats{
"S3": {},
"WebDAV": {},
"SSH": {},
"Local": {},
"Azure": {},
},
},
}
s.registerHook(hookLevelPlumbing, func(error) error {
s.stats.EndTime = time.Now()
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
return nil
})
if err := envconfig.Process("", s.c); err != nil {
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
}
s.file = path.Join("/tmp", s.c.BackupFilename)
if s.c.BackupFilenameExpand {
s.file = os.ExpandEnv(s.file)
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
s.c.BackupPruningPrefix = os.ExpandEnv(s.c.BackupPruningPrefix)
}
s.file = timeutil.Strftime(&s.stats.StartTime, s.file)
_, err := os.Stat("/var/run/docker.sock")
_, dockerHostSet := os.LookupEnv("DOCKER_HOST")
if !os.IsNotExist(err) || dockerHostSet {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, fmt.Errorf("newScript: failed to create docker client")
}
s.cli = cli
}
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
switch logType {
case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...)
case storage.LogLevelError:
s.logger.Errorf("["+context+"] "+msg, params...)
case storage.LogLevelInfo:
default:
s.logger.Infof("["+context+"] "+msg, params...)
}
}
if s.c.AwsS3BucketName != "" {
accessKeyID, err := s.c.resolveSecret(s.c.AwsAccessKeyID, s.c.AwsAccessKeyIDFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsAccessKeyID: %w", err)
}
secretAccessKey, err := s.c.resolveSecret(s.c.AwsSecretAccessKey, s.c.AwsSecretAccessKeyFile)
if err != nil {
return nil, fmt.Errorf("newScript: error resolving AwsSecretAccessKey: %w", err)
}
s3Config := s3.Config{
Endpoint: s.c.AwsEndpoint,
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
EndpointProto: s.c.AwsEndpointProto,
EndpointInsecure: s.c.AwsEndpointInsecure,
RemotePath: s.c.AwsS3Path,
BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert,
PartSize: s.c.AwsPartSize,
}
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, s3Backend)
}
}
if s.c.WebdavUrl != "" {
webDavConfig := webdav.Config{
URL: s.c.WebdavUrl,
URLInsecure: s.c.WebdavUrlInsecure,
Username: s.c.WebdavUsername,
Password: s.c.WebdavPassword,
RemotePath: s.c.WebdavPath,
}
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, webdavBackend)
}
}
if s.c.SSHHostName != "" {
sshConfig := ssh.Config{
HostName: s.c.SSHHostName,
Port: s.c.SSHPort,
User: s.c.SSHUser,
Password: s.c.SSHPassword,
IdentityFile: s.c.SSHIdentityFile,
IdentityPassphrase: s.c.SSHIdentityPassphrase,
RemotePath: s.c.SSHRemotePath,
}
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
return nil, err
} else {
s.storages = append(s.storages, sshBackend)
}
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
localConfig := local.Config{
ArchivePath: s.c.BackupArchive,
LatestSymlink: s.c.BackupLatestSymlink,
}
localBackend := local.NewStorageBackend(localConfig, logFunc)
s.storages = append(s.storages, localBackend)
}
if s.c.AzureStorageAccountName != "" {
azureConfig := azure.Config{
ContainerName: s.c.AzureStorageContainerName,
AccountName: s.c.AzureStorageAccountName,
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
Endpoint: s.c.AzureStorageEndpoint,
RemotePath: s.c.AzureStoragePath,
}
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
if err != nil {
return nil, err
}
s.storages = append(s.storages, azureBackend)
}
if s.c.EmailNotificationRecipient != "" {
emailURL := fmt.Sprintf(
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
s.c.EmailSMTPUsername,
s.c.EmailSMTPPassword,
s.c.EmailSMTPHost,
s.c.EmailSMTPPort,
s.c.EmailNotificationSender,
s.c.EmailNotificationRecipient,
)
s.c.NotificationURLs = append(s.c.NotificationURLs, emailURL)
s.logger.Warn(
"Using EMAIL_* keys for providing notification configuration has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use NOTIFICATION_URLS instead. Refer to the README for an upgrade guide.",
)
}
hookLevel, ok := hookLevels[s.c.NotificationLevel]
if !ok {
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
}
s.hookLevel = hookLevel
if len(s.c.NotificationURLs) > 0 {
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
if senderErr != nil {
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
}
s.sender = sender
tmpl := template.New("")
tmpl.Funcs(templateHelpers)
tmpl, err = tmpl.Parse(defaultNotifications)
if err != nil {
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
}
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
if err != nil {
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
}
}
s.template = tmpl
// To prevent duplicate notifications, ensure the regsistered callbacks
// run mutually exclusive.
s.registerHook(hookLevelError, func(err error) error {
if err == nil {
return nil
}
return s.notifyFailure(err)
})
s.registerHook(hookLevelInfo, func(err error) error {
if err != nil {
return nil
}
return s.notifySuccess()
})
}
return s, nil
}
// stopContainers stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
func (s *script) stopContainers() (func() error, error) {
if s.cli == nil {
return noop, nil
}
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
}
containerLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
s.c.BackupStopContainerLabel,
)
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: containerLabel,
}),
})
if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
}
if len(containersToStop) == 0 {
return noop, nil
}
s.logger.Infof(
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
len(containersToStop),
containerLabel,
len(allContainers),
)
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
var stopError error
if len(stopErrors) != 0 {
stopError = fmt.Errorf(
"stopContainers: %d error(s) stopping containers: %w",
len(stopErrors),
errors.Join(stopErrors...),
)
}
s.stats.Containers = ContainersStats{
All: uint(len(allContainers)),
ToStop: uint(len(containersToStop)),
Stopped: uint(len(stoppedContainers)),
}
return func() error {
servicesRequiringUpdate := map[string]struct{}{}
var restartErrors []error
for _, container := range stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
servicesRequiringUpdate[swarmServiceName] = struct{}{}
continue
}
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
}
}
if len(servicesRequiringUpdate) != 0 {
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
for serviceName := range servicesRequiringUpdate {
var serviceMatch swarm.Service
for _, service := range services {
if service.Spec.Name == serviceName {
serviceMatch = service
break
}
}
if serviceMatch.ID == "" {
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
}
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
); err != nil {
restartErrors = append(restartErrors, err)
}
}
}
if len(restartErrors) != 0 {
return fmt.Errorf(
"stopContainers: %d error(s) restarting containers and services: %w",
len(restartErrors),
errors.Join(restartErrors...),
)
}
s.logger.Infof(
"Restarted %d container(s) and the matching service(s).",
len(stoppedContainers),
)
return nil
}, stopError
}
// createArchive creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) createArchive() error {
backupSources := s.c.BackupSources
if s.c.BackupFromSnapshot {
s.logger.Warn(
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
)
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(backupSources); err != nil {
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
}
s.logger.Infof("Removed snapshot `%s`.", backupSources)
return nil
})
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
PreserveTimes: true,
PreserveOwner: true,
}); err != nil {
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
}
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
}
tarFile := s.file
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(tarFile); err != nil {
return fmt.Errorf("createArchive: error removing tar file: %w", err)
}
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
if err != nil {
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
}
var filesEligibleForBackup []string
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
if err != nil {
return err
}
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
return nil
}
filesEligibleForBackup = append(filesEligibleForBackup, path)
return nil
}); err != nil {
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
}
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
}
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
return nil
}
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
// In case no passphrase is given it returns early, leaving the backup file
// untouched.
func (s *script) encryptArchive() error {
if s.c.GpgPassphrase == "" {
return nil
}
gpgFile := fmt.Sprintf("%s.gpg", s.file)
s.registerHook(hookLevelPlumbing, func(error) error {
if err := remove(gpgFile); err != nil {
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
}
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
return nil
})
outFile, err := os.Create(gpgFile)
if err != nil {
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
}
defer outFile.Close()
_, name := path.Split(s.file)
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
IsBinary: true,
FileName: name,
}, nil)
if err != nil {
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
}
defer dst.Close()
src, err := os.Open(s.file)
if err != nil {
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
}
if _, err := io.Copy(dst, src); err != nil {
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
}
s.file = gpgFile
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
return nil
}
// copyArchive makes sure the backup file is copied to both local and remote locations
// as per the given configuration.
func (s *script) copyArchive() error {
_, name := path.Split(s.file)
if stat, err := os.Stat(s.file); err != nil {
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
} else {
size := stat.Size()
s.stats.BackupFile = BackupFileStats{
Size: uint64(size),
Name: name,
FullPath: s.file,
}
}
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
return b.Copy(s.file)
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("copyArchive: error copying archive: %w", err)
}
return nil
}
// pruneBackups rotates away backups from local and remote storages using
// the given configuration. In case the given configuration would delete all
// backups, it does nothing instead and logs a warning.
func (s *script) pruneBackups() error {
if s.c.BackupRetentionDays < 0 {
return nil
}
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
eg := errgroup.Group{}
for _, backend := range s.storages {
b := backend
eg.Go(func() error {
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
if err != nil {
return err
}
s.stats.Lock()
s.stats.Storages[b.Name()] = StorageStats{
Total: stats.Total,
Pruned: stats.Pruned,
}
s.stats.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
}
return nil
}
// must exits the script run prematurely in case the given error
// is non-nil.
func (s *script) must(err error) {
if err != nil {
s.logger.Errorf("Fatal error running backup: %s", err)
panic(err)
}
}

View File

@@ -1,45 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"sync"
"time"
)
// ContainersStats stats about the docker containers
type ContainersStats struct {
All uint
ToStop uint
Stopped uint
StopErrors uint
}
// BackupFileStats stats about the created backup file
type BackupFileStats struct {
Name string
FullPath string
Size uint64
}
// StorageStats stats about the status of an archival directory
type StorageStats struct {
Total uint
Pruned uint
PruneErrors uint
}
// Stats global stats regarding script execution
type Stats struct {
sync.Mutex
StartTime time.Time
EndTime time.Time
TookTime time.Duration
LockedTime time.Duration
LogOutput *bytes.Buffer
Containers ContainersStats
BackupFile BackupFileStats
Storages map[string]StorageStats
}

View File

@@ -1,52 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"bytes"
"fmt"
"io"
"os"
)
var noop = func() error { return nil }
// remove removes the given file or directory from disk.
func remove(location string) error {
fi, err := os.Lstat(location)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
}
if fi.IsDir() {
err = os.RemoveAll(location)
} else {
err = os.Remove(location)
}
if err != nil {
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
}
return nil
}
// buffer takes an io.Writer and returns a wrapped version of the
// writer that writes to both the original target as well as the returned buffer
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
return buffering, &buffering.buf
}
type bufferingWriter struct {
buf bytes.Buffer
writer io.Writer
}
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
if n, err := b.buf.Write(p); err != nil {
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
}
return b.writer.Write(p)
}

View File

@@ -1,40 +0,0 @@
# Notification templates reference
In order to customize title and body of notifications you'll have to write a [go template](https://pkg.go.dev/text/template) and mount it inside the `/etc/dockervolumebackup/notifications.d/` directory.
Configuration, data about the backup run and helper functions will be passed to this template, this page documents them fully.
## Data
Here is a list of all data passed to the template:
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
* `StartTime`: time when the script started execution
* `EndTime`: time when the backup has completed successfully (after pruning)
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
* `LockedTime`: amount of time it took for the backup to acquire the exclusive lock
* `LogOutput`: full log of the application
* `Containers`: object containing stats about the docker containers
* `All`: total number of containers
* `ToStop`: number of containers matched by the stop rule
* `Stopped`: number of containers successfully stopped
* `StopErrors`: number of containers that were unable to be stopped (equal to `ToStop - Stopped`)
* `BackupFile`: object containing information about the backup file
* `Name`: name of the backup file (e.g. `backup-2022-02-11T01-00-00.tar.gz`)
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
* `Size`: size in bytes of the backup file
* `Storages`: object that holds stats about each storage
* `Local`, `S3`, `WebDAV`, `Azure` or `SSH`:
* `Total`: total number of backup files
* `Pruned`: number of backup files that were deleted due to pruning rule
* `PruneErrors`: number of backup files that were unable to be pruned
## Functions
Some formatting and helper functions are also available:
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
* `env`: returns the value of the environment variable of the given key if set

View File

@@ -3,31 +3,38 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de> # Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0 # SPDX-License-Identifier: MPL-2.0
# Portions of this file are taken from github.com/futurice/docker-volume-backup
# See NOTICE for information about authors and licensing.
set -e set -e
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then # Write cronjob env to file, fill in sensible defaults, and read them back in
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}" mkdir -p /etc/backup
cat <<EOF > /etc/backup.env
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION." BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab - BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
else BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory." BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
crontab -r && crontab /dev/null AWS_S3_BUCKET_NAME="${AWS_S3_BUCKET_NAME:-}"
for file in /etc/dockervolumebackup/conf.d/*; do AWS_ENDPOINT="${AWS_ENDPOINT:-s3.amazonaws.com}"
source $file AWS_ENDPOINT_PROTO="${AWS_ENDPOINT_PROTO:-https}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}" AWS_ENDPOINT_INSECURE="${AWS_ENDPOINT_INSECURE:-}"
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
done
fi
if [ ! -z "$SERVE_METRICS_PATH" ]; then GPG_PASSPHRASE="${GPG_PASSPHRASE:-}"
mkdir -p /var/www/html${SERVE_METRICS_PATH} EOF
echo "ok" > /var/www/html${SERVE_METRICS_PATH}/metrics.txt chmod a+x /etc/backup.env
httpd -h /var/www/html -p "${SERVE_METRICS_PORT:-80}" source /etc/backup.env
echo "Serving metrics on port ${SERVE_METRICS_PORT:-80}."
fi
# Add our cron entry, and direct stdout & stderr to Docker commands stdout
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
# Let cron take the wheel
echo "Starting cron in foreground." echo "Starting cron in foreground."
crond -f -d 8 crond -f -l 8

74
go.mod
View File

@@ -1,61 +1,43 @@
module github.com/offen/docker-volume-backup module github.com/offen/docker-volume-backup
go 1.19 go 1.17
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/docker/docker v20.10.8+incompatible
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/joho/godotenv v1.3.0
github.com/containrrr/shoutrrr v0.7.1 github.com/minio/minio-go/v7 v7.0.12
github.com/cosiner/argv v0.1.0 github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a
github.com/docker/docker v24.0.5+incompatible golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
github.com/gofrs/flock v0.8.1
github.com/kelseyhightower/envconfig v1.4.0
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.61
github.com/otiai10/copy v1.11.0
github.com/pkg/sftp v1.13.5
github.com/sirupsen/logrus v1.9.3
github.com/studio-b12/gowebdav v0.9.0
golang.org/x/crypto v0.11.0
golang.org/x/sync v0.3.0
) )
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/containerd/containerd v1.5.5 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/protobuf v1.5.0 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.10 // indirect
github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid v1.3.1 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/minio/md5-simd v1.1.0 // indirect
github.com/kr/fs v0.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/rs/xid v1.5.0 // indirect github.com/rs/xid v1.2.1 // indirect
golang.org/x/net v0.12.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect golang.org/x/text v0.3.4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
gotest.tools/v3 v3.0.3 // indirect google.golang.org/grpc v1.33.2 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/ini.v1 v1.57.0 // indirect
) )

1327
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package azure
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"text/template"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/offen/docker-volume-backup/internal/storage"
)
type azureBlobStorage struct {
*storage.StorageBackend
client *azblob.Client
containerName string
}
// Config contains values that define the configuration of an Azure Blob Storage.
type Config struct {
AccountName string
ContainerName string
PrimaryAccountKey string
Endpoint string
RemotePath string
}
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
}
var ep bytes.Buffer
if err := endpointTemplate.Execute(&ep, opts); err != nil {
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
}
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
var client *azblob.Client
if opts.PrimaryAccountKey != "" {
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
}
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
} else {
cred, err := azidentity.NewManagedIdentityCredential(nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
}
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
}
}
storage := azureBlobStorage{
client: client,
containerName: opts.ContainerName,
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
}
return &storage, nil
}
// Name returns the name of the storage backend
func (b *azureBlobStorage) Name() string {
return "Azure"
}
// Copy copies the given file to the storage backend.
func (b *azureBlobStorage) Copy(file string) error {
fileReader, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
}
_, err = b.client.UploadStream(
context.Background(),
b.containerName,
filepath.Join(b.DestinationPath, filepath.Base(file)),
fileReader,
nil,
)
if err != nil {
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
}
return nil
}
// Prune rotates away backups according to the configuration and provided
// deadline for the Azure Blob storage backend.
func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
lookupPrefix := filepath.Join(b.DestinationPath, pruningPrefix)
pager := b.client.NewListBlobsFlatPager(b.containerName, &container.ListBlobsFlatOptions{
Prefix: &lookupPrefix,
})
var matches []string
var totalCount uint
for pager.More() {
resp, err := pager.NextPage(context.Background())
if err != nil {
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
}
for _, v := range resp.Segment.BlobItems {
totalCount++
if v.Properties.LastModified.Before(deadline) {
matches = append(matches, *v.Name)
}
}
}
stats := storage.PruneStats{
Total: totalCount,
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
wg := sync.WaitGroup{}
wg.Add(len(matches))
var errs []error
for _, match := range matches {
name := match
go func() {
_, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil)
if err != nil {
errs = append(errs, err)
}
wg.Done()
}()
}
wg.Wait()
if len(errs) != 0 {
return errors.Join(errs...)
}
return nil
}); err != nil {
return &stats, err
}
return &stats, nil
}

View File

@@ -1,160 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package local
import (
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
)
type localStorage struct {
*storage.StorageBackend
latestSymlink string
}
// Config allows configuration of a local storage backend.
type Config struct {
ArchivePath string
LatestSymlink string
}
// NewStorageBackend creates and initializes a new local storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) storage.Backend {
return &localStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.ArchivePath,
Log: logFunc,
},
latestSymlink: opts.LatestSymlink,
}
}
// Name return the name of the storage backend
func (b *localStorage) Name() string {
return "Local"
}
// Copy copies the given file to the local storage backend.
func (b *localStorage) Copy(file string) error {
_, name := path.Split(file)
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
if b.latestSymlink != "" {
symlink := path.Join(b.DestinationPath, b.latestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
}
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the local storage backend.
func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
globPattern := path.Join(
b.DestinationPath,
fmt.Sprintf("%s*", pruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
if err != nil {
return nil, fmt.Errorf(
"(*localStorage).Prune: Error calling stat on file %s: %w",
candidate,
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
var removeErrors []error
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}
if len(removeErrors) != 0 {
return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors),
errors.Join(removeErrors...),
)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}
// copy creates a copy of the file located at `dst` at `src`.
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
_, err = io.Copy(out, in)
if err != nil {
out.Close()
return err
}
return out.Close()
}

View File

@@ -1,189 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package s3
import (
"context"
"crypto/x509"
"errors"
"fmt"
"os"
"path"
"path/filepath"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage"
)
type s3Storage struct {
*storage.StorageBackend
client *minio.Client
bucket string
storageClass string
partSize int64
}
// Config contains values that define the configuration of a S3 backend.
type Config struct {
Endpoint string
AccessKeyID string
SecretAccessKey string
IamRoleEndpoint string
EndpointProto string
EndpointInsecure bool
RemotePath string
BucketName string
StorageClass string
PartSize int64
CACert *x509.Certificate
}
// NewStorageBackend creates and initializes a new S3/Minio storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var creds *credentials.Credentials
if opts.AccessKeyID != "" && opts.SecretAccessKey != "" {
creds = credentials.NewStaticV4(
opts.AccessKeyID,
opts.SecretAccessKey,
"",
)
} else if opts.IamRoleEndpoint != "" {
creds = credentials.NewIAM(opts.IamRoleEndpoint)
} else {
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: opts.EndpointProto == "https",
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
}
if opts.EndpointInsecure {
if !options.Secure {
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport.TLSClientConfig.InsecureSkipVerify = true
} else if opts.CACert != nil {
if transport.TLSClientConfig.RootCAs == nil {
transport.TLSClientConfig.RootCAs = x509.NewCertPool()
}
transport.TLSClientConfig.RootCAs.AddCert(opts.CACert)
}
options.Transport = transport
mc, err := minio.New(opts.Endpoint, &options)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
}
return &s3Storage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: mc,
bucket: opts.BucketName,
storageClass: opts.StorageClass,
partSize: opts.PartSize,
}, nil
}
// Name returns the name of the storage backend
func (v *s3Storage) Name() string {
return "S3"
}
// Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file)
putObjectOptions := minio.PutObjectOptions{
ContentType: "application/tar+gzip",
StorageClass: b.storageClass,
}
if b.partSize > 0 {
srcFileInfo, err := os.Stat(file)
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
}
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
}
putObjectOptions.PartSize = uint64(partSize)
}
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
}
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend.
func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{
Prefix: filepath.Join(b.DestinationPath, pruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo
var lenCandidates int
for candidate := range candidates {
lenCandidates++
if candidate.Err != nil {
return nil, fmt.Errorf(
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
candidate.Err,
)
}
if candidate.LastModified.Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
for _, match := range matches {
objectsCh <- match
}
close(objectsCh)
}()
errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{})
var removeErrors []error
for result := range errChan {
if result.Err != nil {
removeErrors = append(removeErrors, result.Err)
}
}
if len(removeErrors) != 0 {
return errors.Join(removeErrors...)
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,190 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package ssh
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type sshStorage struct {
*storage.StorageBackend
client *ssh.Client
sftpClient *sftp.Client
hostName string
}
// Config allows to configure a SSH backend.
type Config struct {
HostName string
Port string
User string
Password string
IdentityFile string
IdentityPassphrase string
RemotePath string
}
// NewStorageBackend creates and initializes a new SSH storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
var authMethods []ssh.AuthMethod
if opts.Password != "" {
authMethods = append(authMethods, ssh.Password(opts.Password))
}
if _, err := os.Stat(opts.IdentityFile); err == nil {
key, err := ioutil.ReadFile(opts.IdentityFile)
if err != nil {
return nil, errors.New("NewStorageBackend: error reading the private key")
}
var signer ssh.Signer
if opts.IdentityPassphrase != "" {
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
} else {
signer, err = ssh.ParsePrivateKey(key)
if err != nil {
return nil, errors.New("NewStorageBackend: error parsing the private key")
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
}
sshClientConfig := &ssh.ClientConfig{
User: opts.User,
Auth: authMethods,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
}
_, _, err = sshClient.SendRequest("keepalive", false, nil)
if err != nil {
return nil, err
}
sftpClient, err := sftp.NewClient(sshClient)
if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
}
return &sshStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: sshClient,
sftpClient: sftpClient,
hostName: opts.HostName,
}, nil
}
// Name returns the name of the storage backend
func (b *sshStorage) Name() string {
return "SSH"
}
// Copy copies the given file to the SSH storage backend.
func (b *sshStorage) Copy(file string) error {
source, err := os.Open(file)
_, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
}
defer source.Close()
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
}
defer destination.Close()
chunk := make([]byte, 1000000)
for {
num, err := source.Read(chunk)
if err == io.EOF {
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return errors.New("(*sshStorage).Copy: failed to write stream")
}
break
}
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
tot, err := destination.Write(chunk[:num])
if err != nil {
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
}
if tot != len(chunk[:num]) {
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
}
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend.
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
}
var matches []string
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate.Name())
}
}
stats := &storage.PruneStats{
Total: uint(len(candidates)),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
for _, match := range matches {
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,61 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package storage
import (
"time"
)
// Backend is an interface for defining functions which all storage providers support.
type Backend interface {
Copy(file string) error
Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error)
Name() string
}
// StorageBackend is a generic type of storage. Everything here are common properties of all storage types.
type StorageBackend struct {
DestinationPath string
RetentionDays int
Log Log
}
type LogLevel int
const (
LogLevelInfo LogLevel = iota
LogLevelWarning
LogLevelError
)
type Log func(logType LogLevel, context string, msg string, params ...any)
// PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct {
Total uint
Pruned uint
}
// DoPrune holds general control flow that applies to any kind of storage.
// Callers can pass in a thunk that performs the actual deletion of files.
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
if lenMatches != 0 && lenMatches != lenCandidates {
if err := doRemoveFiles(); err != nil {
return err
}
b.Log(LogLevelInfo, context,
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
lenMatches,
lenCandidates,
description,
b.RetentionDays,
)
} else if lenMatches != 0 && lenMatches == lenCandidates {
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
} else {
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
}
return nil
}

View File

@@ -1,123 +0,0 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package webdav
import (
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/offen/docker-volume-backup/internal/storage"
"github.com/studio-b12/gowebdav"
)
type webDavStorage struct {
*storage.StorageBackend
client *gowebdav.Client
url string
}
// Config allows to configure a WebDAV storage backend.
type Config struct {
URL string
RemotePath string
Username string
Password string
URLInsecure bool
}
// NewStorageBackend creates and initializes a new WebDav storage backend.
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
if opts.Username == "" || opts.Password == "" {
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
} else {
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
if opts.URLInsecure {
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
if !ok {
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
}
webdavTransport := defaultTransport.Clone()
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
webdavClient.SetTransport(webdavTransport)
}
return &webDavStorage{
StorageBackend: &storage.StorageBackend{
DestinationPath: opts.RemotePath,
Log: logFunc,
},
client: webdavClient,
}, nil
}
}
// Name returns the name of the storage backend
func (b *webDavStorage) Name() string {
return "WebDAV"
}
// Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error {
_, name := path.Split(file)
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
}
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
}
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
return nil
}
// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend.
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
candidates, err := b.client.ReadDir(b.DestinationPath)
if err != nil {
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
}
var matches []fs.FileInfo
var lenCandidates int
for _, candidate := range candidates {
if !strings.HasPrefix(candidate.Name(), pruningPrefix) {
continue
}
lenCandidates++
if candidate.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
}
stats := &storage.PruneStats{
Total: uint(lenCandidates),
Pruned: uint(len(matches)),
}
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
for _, match := range matches {
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
}
}
return nil
}); err != nil {
return stats, err
}
return stats, nil
}

View File

@@ -1,58 +0,0 @@
version: '3'
services:
storage:
image: mcr.microsoft.com/azure-storage/azurite
volumes:
- azurite_backup_data:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
az_cli:
image: mcr.microsoft.com/azure-cli
volumes:
- ./local:/dump
command:
- /bin/sh
- -c
- |
az storage container create --name test-container
depends_on:
storage:
condition: service_healthy
environment:
AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://storage:10000/devstoreaccount1;
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
AZURE_STORAGE_ACCOUNT_NAME: devstoreaccount1
AZURE_STORAGE_PRIMARY_ACCOUNT_KEY: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==
AZURE_STORAGE_CONTAINER_NAME: test-container
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
AZURE_STORAGE_PATH: 'path/to/backup'
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
azurite_backup_data:
name: azurite_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -1,48 +0,0 @@
version: '3'
services:
minio:
hostname: minio.local
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server --certs-dir "/certs" --address ":443" /data'
volumes:
- minio_backup_data:/data
- ./minio.crt:/certs/public.crt
- ./minio.key:/certs/private.key
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
AWS_ACCESS_KEY_ID: test
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
AWS_ENDPOINT: minio.local:443
AWS_ENDPOINT_CA_CERT: /root/minio-rootCA.crt
AWS_S3_BUCKET_NAME: backup
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./rootCA.crt:/root/minio-rootCA.crt
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
minio_backup_data:
name: minio_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
openssl genrsa -des3 -passout pass:test -out rootCA.key 4096
openssl req -passin pass:test \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc." \
-x509 -new -key rootCA.key -sha256 -days 1 -out rootCA.crt
openssl genrsa -out minio.key 4096
openssl req -new -sha256 -key minio.key \
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc./CN=minio" \
-out minio.csr
openssl x509 -req -passin pass:test \
-in minio.csr \
-CA rootCA.crt -CAkey rootCA.key -CAcreateserial \
-extfile san.cnf \
-out minio.crt -days 1 -sha256
openssl x509 -in minio.crt -noout -text
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
docker compose down --volumes

View File

@@ -1 +0,0 @@
subjectAltName = DNS:minio.local

View File

@@ -3,15 +3,10 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker network create test_network docker network create test_network
docker volume create backup_data docker volume create backup_data
docker volume create app_data docker volume create app_data
# This volume is created to test whether empty directories are handled
# correctly. It is not supposed to hold any data.
docker volume create empty_data
docker run -d \ docker run -d \
--name minio \ --name minio \
@@ -28,15 +23,16 @@ docker exec minio mkdir -p /data/backup
docker run -d \ docker run -d \
--name offen \ --name offen \
--network test_network \ --network test_network \
--label "docker-volume-backup.stop-during-backup=true" \
-v app_data:/var/opt/offen/ \ -v app_data:/var/opt/offen/ \
offen/offen:latest offen/offen:latest
sleep 10 sleep 10
docker run --rm \ docker run -d \
--name backup \
--network test_network \ --network test_network \
-v app_data:/backup/app_data \ -v app_data:/backup/app_data \
-v empty_data:/backup/empty_data \
-v /var/run/docker.sock:/var/run/docker.sock \ -v /var/run/docker.sock:/var/run/docker.sock \
--env AWS_ACCESS_KEY_ID=test \ --env AWS_ACCESS_KEY_ID=test \
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \ --env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
@@ -44,20 +40,25 @@ docker run --rm \
--env AWS_ENDPOINT_PROTO=http \ --env AWS_ENDPOINT_PROTO=http \
--env AWS_S3_BUCKET_NAME=backup \ --env AWS_S3_BUCKET_NAME=backup \
--env BACKUP_FILENAME=test.tar.gz \ --env BACKUP_FILENAME=test.tar.gz \
--env "BACKUP_FROM_SNAPSHOT=true" \ --env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
--entrypoint backup \ offen/docker-volume-backup:$TEST_VERSION
offen/docker-volume-backup:${TEST_VERSION:-canary}
docker run --rm \ docker exec backup backup
docker run --rm -it \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data' ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
pass "Found relevant files in untared remote backup." echo "[TEST:PASS] Found relevant files in untared backup."
# This test does not stop containers during backup. This is happening on if [ "$(docker ps -q | wc -l)" != "3" ]; then
# purpose in order to cover this setup as well. echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
expect_running_containers "2" docker ps
exit 1
fi
docker rm $(docker stop minio offen) echo "[TEST:PASS] All containers running post backup."
docker rm $(docker stop minio offen backup)
docker volume rm backup_data app_data docker volume rm backup_data app_data
docker network rm test_network docker network rm test_network

View File

@@ -1,50 +0,0 @@
version: '3.8'
services:
database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
# this is testing the deprecated label on purpose
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
- docker-volume-backup.exec-label=test
volumes:
- app_data:/tmp/volume
other_database:
image: mariadb:10.7
deploy:
restart_policy:
condition: on-failure
environment:
MARIADB_ROOT_PASSWORD: test
MARIADB_DATABASE: backup
labels:
- docker-volume-backup.archive-pre=touch /tmp/volume/not-relevant.txt
- docker-volume-backup.exec-label=not-relevant
volumes:
- app_data:/tmp/volume
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_LABEL: test
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:

View File

@@ -1,64 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p ./local
docker compose up -d
sleep 30 # mariadb likes to take a bit before responding
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
if [ -f ./backup/data/not-relevant.txt ]; then
fail "Command ran for container with other label."
fi
pass "Command did not run for container with other label."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
fi
pass "Did not find unexpected file."
docker compose down --volumes
sudo rm -rf ./local
info "Running commands test in swarm mode next."
mkdir -p ./local
docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
if [ -f ./backup/data/post.txt ]; then
fail "File created in post command was present in backup."
fi
pass "Did not find unexpected file."
docker stack rm test_stack
docker swarm leave --force

View File

@@ -10,11 +10,10 @@ services:
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data' entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes: volumes:
- minio_backup_data:/data - backup_data:/data
backup: backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION:-canary} image: offen/docker-volume-backup:${TEST_VERSION}
hostname: hostnametoken
depends_on: depends_on:
- minio - minio
restart: always restart: always
@@ -24,13 +23,13 @@ services:
AWS_ENDPOINT: minio:9000 AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME_EXPAND: 'true' BACKUP_FILENAME: test.tar.gz
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ? BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7} BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test BACKUP_PRUNING_PREFIX: test
volumes: volumes:
- ./local:/archive
- app_data:/backup/app_data:ro - app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
@@ -41,7 +40,7 @@ services:
volumes: volumes:
- app_data:/var/opt/offen - app_data:/var/opt/offen
volumes: volumes:
minio_backup_data: backup_data:
name: minio_backup_data
app_data: app_data:

55
test/compose/run.sh Executable file
View File

@@ -0,0 +1,55 @@
#!/bin/sh
set -e
cd $(dirname $0)
mkdir -p local
docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared remote backup."
tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
echo "[TEST:PASS] Found relevant files in untared local backup."
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker-compose ps
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker-compose down
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5
docker-compose exec backup backup
docker run --rm -it \
-v compose_backup_data:/data alpine \
ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]'
echo "[TEST:PASS] Remote backups have not been deleted."
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
find ./local -type f
fi
echo "[TEST:PASS] Local backups have not been deleted."
docker-compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="conf.tar.gz"
BACKUP_CRON_EXPRESSION="*/1 * * * *"

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="other.tar.gz"
BACKUP_CRON_EXPRESSION="*/1 * * * *"

View File

@@ -1,2 +0,0 @@
BACKUP_FILENAME="never.tar.gz"
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"

View File

@@ -1,23 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,31 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100
docker compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then
fail "Config from file was not used."
fi
pass "Config from file was used."
if [ ! -f ./local/other.tar.gz ]; then
fail "Run on same schedule did not succeed."
fi
pass "Run on same schedule succeeded."
if [ -f ./local/never.tar.gz ]; then
fail "Unexpected file was found."
fi
pass "Unexpected cron did not run."

View File

@@ -1,4 +0,0 @@
ARG version=canary
FROM offen/docker-volume-backup:$version
RUN apk add rsync

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'mkdir -p /tmp/unpack && tar -xvf $$COMMAND_RUNTIME_ARCHIVE_FILEPATH -C /tmp/unpack && rsync -r /tmp/unpack/backup/app_data /local'
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/local
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,29 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
export BASE_VERSION="${TEST_VERSION:-canary}"
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "2"
if [ ! -f "./local/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
docker compose down --volumes

1
test/gpg/.gitignore vendored
View File

@@ -1 +0,0 @@
local

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
GPG_PASSPHRASE: 1234#$$ecret
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,33 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
expect_running_containers "2"
tmp_dir=$(mktemp -d)
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
fail "Could not find expected file in untared archive."
fi
rm ./local/decrypted.tar.gz
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup."
fi
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,15 +0,0 @@
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
volumes:
- ./local:/archive
- ./sources:/backup/data:ro

View File

@@ -1,28 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
docker compose down --volumes
out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
if [ ! -f "$out/backup/data/me.txt" ]; then
fail "Expected file was not found."
fi
pass "Expected file was found."
if [ -f "$out/backup/data/skip.me" ]; then
fail "Ignored file was found."
fi
pass "Ignored file was not found."

View File

@@ -1 +0,0 @@
local

View File

@@ -1,29 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ./local:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,55 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker compose exec backup backup
sleep 5
expect_running_containers "2"
tmp_dir=$(mktemp -d)
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
rm -f ./local/test-hostnametoken.tar.gz
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
fail "Could not find expected symlink in untared archive."
fi
pass "Found relevant files in decrypted and untared local backup."
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
fail "Could not find symlink to latest version."
fi
pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi
pass "Local backups have not been deleted."
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,37 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_PRUNING_PREFIX: test
NOTIFICATION_LEVEL: info
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
EXTRA_VALUE: extra-value
volumes:
- ./local:/archive
- app_data:/backup/app_data:ro
- ./notifications.tmpl:/etc/dockervolumebackup/notifications.d/notifications.tmpl
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
gotify:
image: gotify/server
ports:
- 8080:80
environment:
- GOTIFY_DEFAULTUSER_PASS=custom
volumes:
- gotify_data:/app/data
volumes:
app_data:
gotify_data:

View File

@@ -1,7 +0,0 @@
{{ define "title_success" -}}
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
{{- end }}
{{ define "body_success" -}}
Backing up {{ .Stats.BackupFile.FullPath }} succeeded.
{{- end }}

View File

@@ -1,50 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
info "Set up Gotify application using token $GOTIFY_TOKEN"
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then
fail "Expected no notifications to be sent when not configured"
fi
pass "No notifications were sent when not configured."
docker compose down
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker compose up -d
docker compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then
fail "Expected one notifications to be sent when configured"
fi
pass "Correct number of notifications were sent when configured."
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
fail "Unexpected notification title $MESSAGE_TITLE"
fi
pass "Custom notification title was used."
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
fail "Unexpected notification body $MESSAGE_BODY"
fi
pass "Custom notification body was used."
docker compose down --volumes

View File

@@ -1 +0,0 @@
local

View File

@@ -1,27 +0,0 @@
version: '3'
services:
db:
image: postgres:14-alpine
restart: unless-stopped
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
- POSTGRES_USER=test
- POSTGRES_DB=test
backup:
image: offen/docker-volume-backup:${TEST_VERSION}
restart: always
environment:
BACKUP_FILENAME: backup.tar.gz
volumes:
- postgres_data:/backup/postgres:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./local:/archive
volumes:
postgres_data:

View File

@@ -1,30 +0,0 @@
#!/bin/sh
# This test refers to https://github.com/offen/docker-volume-backup/issues/71
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
docker compose up -d
sleep 5
docker compose exec backup backup
tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
sudo find $tmp_dir/backup/postgres > /dev/null
pass "Backup contains files at expected location"
for file in $(sudo find $tmp_dir/backup/postgres); do
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
fi
done
pass "All files and directories in backup preserved their ownership."
docker compose down --volumes

View File

@@ -1,42 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
# A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v minio_backup_data:/minio_data \
alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes

View File

@@ -1,78 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
minio:
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
deploy:
restart_policy:
condition: on-failure
environment:
MINIO_ROOT_USER: test
MINIO_ROOT_PASSWORD: test
MINIO_ACCESS_KEY: test
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
volumes:
- backup_data:/data
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
depends_on:
- minio
deploy:
restart_policy:
condition: on-failure
environment:
AWS_ACCESS_KEY_ID_FILE: /run/secrets/minio_root_user
AWS_SECRET_ACCESS_KEY_FILE: /run/secrets/minio_root_password
AWS_ENDPOINT: minio:9000
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: 7
BACKUP_PRUNING_LEEWAY: 5s
volumes:
- pg_data:/backup/pg_data:ro
- /var/run/docker.sock:/var/run/docker.sock
secrets:
- minio_root_user
- minio_root_password
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy:
replicas: 2
restart_policy:
condition: on-failure
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
restart_policy:
condition: on-failure
volumes:
backup_data:
name: backup_data
pg_data:
name: pg_data
secrets:
minio_root_user:
external: true
minio_root_password:
external: true

View File

@@ -1,44 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init
printf "test" | docker secret create minio_root_user -
printf "GMusLtUmILge2by+z890kQ" | docker secret create minio_root_password -
docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying."
sleep 1
done
sleep 20
docker exec $(docker ps -q -f name=backup) backup
docker run --rm \
-v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup."
sleep 5
expect_running_containers "5"
docker stack rm test_stack
docker secret rm minio_root_password
docker secret rm minio_root_user
docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,47 +0,0 @@
version: '3'
services:
ssh:
image: linuxserver/openssh-server:version-8.6_p1-r3
environment:
- PUID=1000
- PGID=1000
- USER_NAME=test
volumes:
- ./id_rsa.pub:/config/.ssh/authorized_keys
- ssh_backup_data:/tmp
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- ssh
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
SSH_HOST_NAME: ssh
SSH_PORT: 2222
SSH_USER: test
SSH_REMOTE_PATH: /tmp
SSH_IDENTITY_PASSPHRASE: test1234
volumes:
- ./id_rsa:/root/.ssh/id_rsa
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
ssh_backup_data:
name: ssh_backup_data
app_data:

View File

@@ -1,43 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers 3
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v ssh_backup_data:/ssh_data \
alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes
rm -f id_rsa id_rsa.pub

View File

@@ -18,8 +18,8 @@ services:
volumes: volumes:
- backup_data:/data - backup_data:/data
backup: backup: &default_backup_service
image: offen/docker-volume-backup:${TEST_VERSION:-canary} image: offen/docker-volume-backup:${TEST_VERSION}
depends_on: depends_on:
- minio - minio
deploy: deploy:
@@ -43,15 +43,13 @@ services:
image: offen/offen:latest image: offen/offen:latest
labels: labels:
- docker-volume-backup.stop-during-backup=true - docker-volume-backup.stop-during-backup=true
healthcheck:
disable: true
deploy: deploy:
replicas: 2 replicas: 2
restart_policy: restart_policy:
condition: on-failure condition: on-failure
pg: pg:
image: postgres:14-alpine image: postgres:12.2-alpine
environment: environment:
POSTGRES_PASSWORD: example POSTGRES_PASSWORD: example
labels: labels:
@@ -64,6 +62,4 @@ services:
volumes: volumes:
backup_data: backup_data:
name: backup_data
pg_data: pg_data:
name: pg_data

View File

@@ -3,15 +3,13 @@
set -e set -e
cd $(dirname $0) cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker swarm init docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack docker stack deploy --compose-file=docker-compose.yml test_stack
while [ -z $(docker ps -q -f name=backup) ]; do while [ -z $(docker ps -q -f name=backup) ]; do
info "Backup container not ready yet. Retrying." echo "[TEST:INFO] Backup container not ready yet. Retrying."
sleep 1 sleep 1
done done
@@ -19,19 +17,20 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup docker exec $(docker ps -q -f name=backup) backup
docker run --rm \ docker run --rm -it \
-v backup_data:/data alpine \ -v test_stack_backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION' ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
pass "Found relevant files in untared backup." echo "[TEST:PASS] Found relevant files in untared backup."
sleep 5 if [ "$(docker ps -q | wc -l)" != "5" ]; then
expect_running_containers "5" echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
docker ps -a
exit 1
fi
echo "[TEST:PASS] All containers running post backup."
docker stack rm test_stack docker stack rm test_stack
docker swarm leave --force docker swarm leave --force
sleep 10
docker volume rm backup_data
docker volume rm pg_data

View File

@@ -1,2 +0,0 @@
local
backup

View File

@@ -1,30 +0,0 @@
version: '2.4'
services:
alpine:
image: alpine:3.17.3
tty: true
volumes:
- app_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=testuser
- docker-volume-backup.archive-pre=/bin/sh -c 'whoami > /tmp/whoami.txt'
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
user_name=testuser
docker exec user-alpine-1 adduser --disabled-password "$user_name"
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/whoami.txt ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
tar -xvf ./local/test.tar.gz
if [ "$(cat ./backup/data/whoami.txt)" != "$user_name" ]; then
fail "Could not find expected user name."
fi
pass "Found expected user."
docker compose down --volumes
sudo rm -rf ./local

View File

@@ -1,23 +0,0 @@
#!/bin/sh
set -e
info () {
echo "[test:${current_test:-none}:info] "$1""
}
pass () {
echo "[test:${current_test:-none}:pass] "$1""
}
fail () {
echo "[test:${current_test:-none}:fail] "$1""
exit 1
}
expect_running_containers () {
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
fi
pass "$1 containers running."
}

View File

@@ -1,45 +0,0 @@
version: '3'
services:
webdav:
image: bytemark/webdav:2.4
environment:
AUTH_TYPE: Digest
USERNAME: test
PASSWORD: test
volumes:
- webdav_backup_data:/var/lib/dav
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
hostname: hostnametoken
depends_on:
- webdav
restart: always
environment:
BACKUP_FILENAME_EXPAND: 'true'
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test
WEBDAV_URL: http://webdav/
WEBDAV_URL_INSECURE: 'true'
WEBDAV_PATH: /my/new/path/
WEBDAV_USERNAME: test
WEBDAV_PASSWORD: test
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
webdav_backup_data:
name: webdav_backup_data
app_data:

View File

@@ -1,40 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "3"
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d
sleep 5
docker compose exec backup backup
docker run --rm \
-v webdav_backup_data:/webdav_data \
alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted."
docker compose down --volumes