mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
107 Commits
v2.0.0-rc.
...
v2.20.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b441cf3e2b | ||
|
|
82f66565da | ||
|
|
d68814be9d | ||
|
|
3661a4b49b | ||
|
|
e738bd0539 | ||
|
|
342ae5910e | ||
|
|
c2a8cc92fc | ||
|
|
1892d56ff6 | ||
|
|
0b205fe6dc | ||
|
|
8c8a2fa088 | ||
|
|
a850bf13fe | ||
|
|
b52b271bac | ||
|
|
cac5777e79 | ||
|
|
94a1edc4ad | ||
|
|
a654097e59 | ||
|
|
1b1fc4856c | ||
|
|
e81c34b8fc | ||
|
|
9c23767fce | ||
|
|
51af8c3c77 | ||
|
|
1ea0b51b23 | ||
|
|
da8c63f755 | ||
|
|
9bc8db0f7c | ||
|
|
508bc07b4f | ||
|
|
b8f71b04a1 | ||
|
|
5f3832d621 | ||
|
|
4b1127b8c4 | ||
|
|
ae50a3ac4f | ||
|
|
bad22eee93 | ||
|
|
c9ebb9e14e | ||
|
|
6e1b8553e6 | ||
|
|
5ec2b2c3ff | ||
|
|
3bbeba5b83 | ||
|
|
9155b4d130 | ||
|
|
2a17e84ab6 | ||
|
|
00f2359461 | ||
|
|
0504a92a1f | ||
|
|
3ded77448c | ||
|
|
58b42b9036 | ||
|
|
180438f1fc | ||
|
|
30265c14ba | ||
|
|
a57e93d01e | ||
|
|
3e17d1b123 | ||
|
|
0e248010a8 | ||
|
|
e6af6efd8a | ||
|
|
34d04211eb | ||
|
|
8dfdd14527 | ||
|
|
3bb99a7117 | ||
|
|
ddc34be55d | ||
|
|
cb9b4bfcff | ||
|
|
62bd2f4a5a | ||
|
|
6fe629ce87 | ||
|
|
1db896f7cf | ||
|
|
6ded00aa06 | ||
|
|
6b79f1914b | ||
|
|
40ff2e00c9 | ||
|
|
760cc9cebc | ||
|
|
1f9582df51 | ||
|
|
32575c831e | ||
|
|
c062710ce8 | ||
|
|
3a7dfe8e60 | ||
|
|
9ec33510e7 | ||
|
|
4207146fb6 | ||
|
|
1f727f698f | ||
|
|
88c90a206c | ||
|
|
8bad0656b3 | ||
|
|
08d78a0bd6 | ||
|
|
5a6ce81b58 | ||
|
|
dfd0d617e4 | ||
|
|
7bc5b2ccef | ||
|
|
b6ad624115 | ||
|
|
210c7d4540 | ||
|
|
3c06bf8102 | ||
|
|
411c39ee72 | ||
|
|
0c666d0c88 | ||
|
|
a0402b407d | ||
|
|
3193e88fc0 | ||
|
|
c391230be6 | ||
|
|
f946f36fb0 | ||
|
|
5245b5882f | ||
|
|
7f0f173115 | ||
|
|
ad7ec58322 | ||
|
|
b7ab2fbacc | ||
|
|
789fc656e8 | ||
|
|
c59b40f2df | ||
|
|
cff418e735 | ||
|
|
d7ccdd79fc | ||
|
|
bd73a2b5e4 | ||
|
|
6cf5cf47e7 | ||
|
|
53c257065e | ||
|
|
184b7a1e18 | ||
|
|
69a94f226b | ||
|
|
160a47e90b | ||
|
|
59660ec5c7 | ||
|
|
af3e69b7a8 | ||
|
|
5d400cb943 | ||
|
|
88368197c1 | ||
|
|
e46968ed79 | ||
|
|
2c06f81503 | ||
|
|
55d030a06a | ||
|
|
fefc34c6aa | ||
|
|
5922820ada | ||
|
|
8aba98c012 | ||
|
|
70daa0308a | ||
|
|
ede94bcd88 | ||
|
|
aae97a5617 | ||
|
|
825cbb50ef | ||
|
|
bea203af3d |
@@ -3,8 +3,9 @@ version: 2.1
|
|||||||
jobs:
|
jobs:
|
||||||
canary:
|
canary:
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-1604:202007-01
|
image: ubuntu-2004:202201-02
|
||||||
working_directory: ~/docker-volume-backup
|
working_directory: ~/docker-volume-backup
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@@ -19,6 +20,7 @@ jobs:
|
|||||||
name: Run tests
|
name: Run tests
|
||||||
working_directory: ~/docker-volume-backup/test
|
working_directory: ~/docker-volume-backup/test
|
||||||
command: |
|
command: |
|
||||||
|
export GPG_TTY=$(tty)
|
||||||
./test.sh canary
|
./test.sh canary
|
||||||
|
|
||||||
build:
|
build:
|
||||||
@@ -28,6 +30,7 @@ jobs:
|
|||||||
DOCKER_BUILDKIT: '1'
|
DOCKER_BUILDKIT: '1'
|
||||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||||
working_directory: ~/docker-volume-backup
|
working_directory: ~/docker-volume-backup
|
||||||
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- setup_remote_docker:
|
- setup_remote_docker:
|
||||||
@@ -47,6 +50,7 @@ jobs:
|
|||||||
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||||
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
|
||||||
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
tag_args="$tag_args -t offen/docker-volume-backup:latest"
|
||||||
|
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
|
||||||
fi
|
fi
|
||||||
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||||
$tag_args . --push
|
$tag_args . --push
|
||||||
|
|||||||
20
.github/ISSUE_TEMPLATE.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
* **I'm submitting a ...**
|
||||||
|
- [ ] bug report
|
||||||
|
- [ ] feature request
|
||||||
|
- [ ] support request
|
||||||
|
|
||||||
|
* **What is the current behavior?**
|
||||||
|
|
||||||
|
* **If the current behavior is a bug, please provide the configuration and steps to reproduce and if possible a minimal demo of the problem.**
|
||||||
|
|
||||||
|
* **What is the expected behavior?**
|
||||||
|
|
||||||
|
* **What is the motivation / use case for changing the behavior?**
|
||||||
|
|
||||||
|
* **Please tell us about your environment:**
|
||||||
|
|
||||||
|
- Image version:
|
||||||
|
- Docker version:
|
||||||
|
- docker-compose version:
|
||||||
|
|
||||||
|
* **Other information** (e.g. detailed explanation, stacktraces, related issues, suggestions how to fix, links for us to have context, eg. stackoverflow, etc)
|
||||||
14
Dockerfile
14
Dockerfile
@@ -1,20 +1,22 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.17-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
COPY cmd/backup/main.go ./cmd/backup/main.go
|
RUN go mod download
|
||||||
RUN go build -o backup cmd/backup/main.go
|
COPY cmd/backup ./cmd/backup/
|
||||||
|
WORKDIR /app/cmd/backup
|
||||||
|
RUN go build -o backup .
|
||||||
|
|
||||||
FROM alpine:3.14
|
FROM alpine:3.15
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --update ca-certificates
|
RUN apk add --no-cache ca-certificates
|
||||||
|
|
||||||
COPY --from=builder /app/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
|
|
||||||
COPY ./entrypoint.sh /root/
|
COPY ./entrypoint.sh /root/
|
||||||
RUN chmod +x entrypoint.sh
|
RUN chmod +x entrypoint.sh
|
||||||
|
|||||||
133
cmd/backup/archive.go
Normal file
133
cmd/backup/archive.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
||||||
|
// Licensed under the MIT License: https://github.com/walle/targz/blob/57fe4206da5abf7dd3901b4af3891ec2f08c7b08/LICENSE
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
||||||
|
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||||
|
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
||||||
|
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripTrailingSlashes(path string) string {
|
||||||
|
if len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[0 : len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error) {
|
||||||
|
inputFilePath, err := filepath.Abs(inputFilePath)
|
||||||
|
if err == nil {
|
||||||
|
outputFilePath, err = filepath.Abs(outputFilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return inputFilePath, outputFilePath, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func compress(paths []string, outFilePath, subPath string) error {
|
||||||
|
file, err := os.Create(outFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := path.Dir(outFilePath)
|
||||||
|
gzipWriter := gzip.NewWriter(file)
|
||||||
|
tarWriter := tar.NewWriter(gzipWriter)
|
||||||
|
|
||||||
|
for _, p := range paths {
|
||||||
|
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
||||||
|
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tarWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gzipWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing gzip writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: error closing file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
||||||
|
fileInfo, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var link string
|
||||||
|
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
|
var err error
|
||||||
|
if link, err = os.Readlink(path); err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
|
||||||
|
}
|
||||||
|
header.Name = strings.TrimPrefix(path, prefix)
|
||||||
|
|
||||||
|
err = tarWriter.WriteHeader(header)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fileInfo.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(tarWriter, file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
75
cmd/backup/config.go
Normal file
75
cmd/backup/config.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds all configuration values that are expected to be set
|
||||||
|
// by users.
|
||||||
|
type Config struct {
|
||||||
|
BackupSources string `split_words:"true" default:"/backup"`
|
||||||
|
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
||||||
|
BackupFilenameExpand bool `split_words:"true"`
|
||||||
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
|
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||||
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
|
AwsS3BucketName string `split_words:"true"`
|
||||||
|
AwsS3Path string `split_words:"true"`
|
||||||
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
|
AwsEndpointProto string `split_words:"true" default:"https"`
|
||||||
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
|
AwsStorageClass string `split_words:"true"`
|
||||||
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
|
GpgPassphrase string `split_words:"true"`
|
||||||
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
|
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
||||||
|
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
||||||
|
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
||||||
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
|
WebdavUrl string `split_words:"true"`
|
||||||
|
WebdavUrlInsecure bool `split_words:"true"`
|
||||||
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
|
WebdavUsername string `split_words:"true"`
|
||||||
|
WebdavPassword string `split_words:"true"`
|
||||||
|
SSHHostName string `split_words:"true"`
|
||||||
|
SSHPort string `split_words:"true" default:"22"`
|
||||||
|
SSHUser string `split_words:"true"`
|
||||||
|
SSHPassword string `split_words:"true"`
|
||||||
|
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
||||||
|
SSHIdentityPassphrase string `split_words:"true"`
|
||||||
|
SSHRemotePath string `split_words:"true"`
|
||||||
|
ExecLabel string `split_words:"true"`
|
||||||
|
ExecForwardOutput bool `split_words:"true"`
|
||||||
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegexpDecoder struct {
|
||||||
|
Re *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RegexpDecoder) Decode(v string) error {
|
||||||
|
if v == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
re, err := regexp.Compile(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
||||||
|
}
|
||||||
|
*r = RegexpDecoder{Re: re}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
199
cmd/backup/exec.go
Normal file
199
cmd/backup/exec.go
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
||||||
|
// Licensed under the Apache 2.0 License: https://github.com/moby/moby/blob/8e610b2b55bfd1bfa9436ab110d311f5e8a74dcb/LICENSE
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/cosiner/argv"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
|
||||||
|
args, _ := argv.Argv(command, nil, nil)
|
||||||
|
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
||||||
|
Cmd: args[0],
|
||||||
|
AttachStdin: true,
|
||||||
|
AttachStderr: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Close()
|
||||||
|
|
||||||
|
var outBuf, errBuf bytes.Buffer
|
||||||
|
outputDone := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
|
||||||
|
outputDone <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-outputDone:
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, err := ioutil.ReadAll(&outBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
||||||
|
}
|
||||||
|
stderr, err := ioutil.ReadAll(&errBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.ExitCode > 0 {
|
||||||
|
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdout, stderr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) runLabeledCommands(label string) error {
|
||||||
|
f := []filters.KeyValuePair{
|
||||||
|
{Key: "label", Value: label},
|
||||||
|
}
|
||||||
|
if s.c.ExecLabel != "" {
|
||||||
|
f = append(f, filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var hasDeprecatedContainers bool
|
||||||
|
if label == "docker-volume-backup.archive-pre" {
|
||||||
|
f[0] = filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: "docker-volume-backup.exec-pre",
|
||||||
|
}
|
||||||
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
if len(deprecatedContainers) != 0 {
|
||||||
|
hasDeprecatedContainers = true
|
||||||
|
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if label == "docker-volume-backup.archive-post" {
|
||||||
|
f[0] = filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: "docker-volume-backup.exec-post",
|
||||||
|
}
|
||||||
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(f...),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
if len(deprecatedContainers) != 0 {
|
||||||
|
hasDeprecatedContainers = true
|
||||||
|
containersWithCommand = append(containersWithCommand, deprecatedContainers...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(containersWithCommand) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasDeprecatedContainers {
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using `docker-volume-backup.exec-pre` and `docker-volume-backup.exec-post` labels has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use other `-pre` and `-post` labels instead. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
g := new(errgroup.Group)
|
||||||
|
|
||||||
|
for _, container := range containersWithCommand {
|
||||||
|
c := container
|
||||||
|
g.Go(func() error {
|
||||||
|
cmd, ok := c.Labels[label]
|
||||||
|
if !ok && label == "docker-volume-backup.archive-pre" {
|
||||||
|
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
|
||||||
|
} else if !ok && label == "docker-volume-backup.archive-post" {
|
||||||
|
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
||||||
|
stdout, stderr, err := s.exec(c.ID, cmd)
|
||||||
|
if s.c.ExecForwardOutput {
|
||||||
|
os.Stderr.Write(stderr)
|
||||||
|
os.Stdout.Write(stdout)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type lifecyclePhase string
|
||||||
|
|
||||||
|
const (
|
||||||
|
lifecyclePhaseArchive lifecyclePhase = "archive"
|
||||||
|
lifecyclePhaseProcess lifecyclePhase = "process"
|
||||||
|
lifecyclePhaseCopy lifecyclePhase = "copy"
|
||||||
|
lifecyclePhasePrune lifecyclePhase = "prune"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func() error {
|
||||||
|
if s.cli == nil {
|
||||||
|
return cb
|
||||||
|
}
|
||||||
|
return func() error {
|
||||||
|
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||||
|
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
|
||||||
|
}()
|
||||||
|
return cb()
|
||||||
|
}
|
||||||
|
}
|
||||||
56
cmd/backup/hooks.go
Normal file
56
cmd/backup/hooks.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// hook contains a queued action that can be trigger them when the script
|
||||||
|
// reaches a certain point (e.g. unsuccessful backup)
|
||||||
|
type hook struct {
|
||||||
|
level hookLevel
|
||||||
|
action func(err error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type hookLevel int
|
||||||
|
|
||||||
|
const (
|
||||||
|
hookLevelPlumbing hookLevel = iota
|
||||||
|
hookLevelError
|
||||||
|
hookLevelInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
var hookLevels = map[string]hookLevel{
|
||||||
|
"info": hookLevelInfo,
|
||||||
|
"error": hookLevelError,
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerHook adds the given action at the given level.
|
||||||
|
func (s *script) registerHook(level hookLevel, action func(err error) error) {
|
||||||
|
s.hooks = append(s.hooks, hook{level, action})
|
||||||
|
}
|
||||||
|
|
||||||
|
// runHooks runs all hooks that have been registered using the
|
||||||
|
// given levels in the defined ordering. In case executing a hook returns an
|
||||||
|
// error, the following hooks will still be run before the function returns.
|
||||||
|
func (s *script) runHooks(err error) error {
|
||||||
|
sort.SliceStable(s.hooks, func(i, j int) bool {
|
||||||
|
return s.hooks[i].level < s.hooks[j].level
|
||||||
|
})
|
||||||
|
var actionErrors []error
|
||||||
|
for _, hook := range s.hooks {
|
||||||
|
if hook.level > s.hookLevel {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if actionErr := hook.action(err); actionErr != nil {
|
||||||
|
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(actionErrors) != 0 {
|
||||||
|
return join(actionErrors...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
58
cmd/backup/lock.go
Normal file
58
cmd/backup/lock.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gofrs/flock"
|
||||||
|
)
|
||||||
|
|
||||||
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
|
// caller invokes the returned release func. In case the lock is currently blocked
|
||||||
|
// by another execution, it will repeatedly retry until the lock is available
|
||||||
|
// or the given timeout is exceeded.
|
||||||
|
func (s *script) lock(lockfile string) (func() error, error) {
|
||||||
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
|
s.stats.LockedTime = time.Now().Sub(start)
|
||||||
|
}()
|
||||||
|
|
||||||
|
retry := time.NewTicker(5 * time.Second)
|
||||||
|
defer retry.Stop()
|
||||||
|
deadline := time.NewTimer(s.c.LockTimeout)
|
||||||
|
defer deadline.Stop()
|
||||||
|
|
||||||
|
fileLock := flock.New(lockfile)
|
||||||
|
|
||||||
|
for {
|
||||||
|
acquired, err := fileLock.TryLock()
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("lock: error trying lock: %w", err)
|
||||||
|
}
|
||||||
|
if acquired {
|
||||||
|
if s.encounteredLock {
|
||||||
|
s.logger.Info("Acquired exclusive lock on subsequent attempt, ready to continue.")
|
||||||
|
}
|
||||||
|
return fileLock.Unlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.encounteredLock {
|
||||||
|
s.logger.Infof(
|
||||||
|
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
||||||
|
s.c.LockTimeout,
|
||||||
|
)
|
||||||
|
s.encounteredLock = true
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-retry.C:
|
||||||
|
continue
|
||||||
|
case <-deadline.C:
|
||||||
|
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,511 +1,58 @@
|
|||||||
// Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/gofrs/flock"
|
|
||||||
"github.com/kelseyhightower/envconfig"
|
|
||||||
"github.com/leekchan/timeutil"
|
|
||||||
"github.com/minio/minio-go/v7"
|
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/walle/targz"
|
|
||||||
"golang.org/x/crypto/openpgp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
unlock := lock("/var/lock/dockervolumebackup.lock")
|
|
||||||
defer unlock()
|
|
||||||
|
|
||||||
s, err := newScript()
|
s, err := newScript()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.must(func() error {
|
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
|
defer unlock()
|
||||||
|
s.must(err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if pArg := recover(); pArg != nil {
|
||||||
|
if err, ok := pArg.(error); ok {
|
||||||
|
if hookErr := s.runHooks(err); hookErr != nil {
|
||||||
|
s.logger.Errorf("An error occurred calling the registered hooks: %s", hookErr)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
panic(pArg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.runHooks(nil); err != nil {
|
||||||
|
s.logger.Errorf(
|
||||||
|
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
s.logger.Info("Finished running backup tasks.")
|
||||||
|
}()
|
||||||
|
|
||||||
|
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
|
||||||
restartContainers, err := s.stopContainers()
|
restartContainers, err := s.stopContainers()
|
||||||
|
// The mechanism for restarting containers is not using hooks as it
|
||||||
|
// should happen as soon as possible (i.e. before uploading backups or
|
||||||
|
// similar).
|
||||||
defer func() {
|
defer func() {
|
||||||
s.must(restartContainers())
|
s.must(restartContainers())
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return s.takeBackup()
|
return s.createArchive()
|
||||||
}())
|
})())
|
||||||
|
|
||||||
s.must(s.encryptBackup())
|
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
|
||||||
s.must(s.copyBackup())
|
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
|
||||||
s.must(s.removeArtifacts())
|
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
|
||||||
s.must(s.pruneOldBackups())
|
|
||||||
s.logger.Info("Finished running backup tasks.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// script holds all the stateful information required to orchestrate a
|
|
||||||
// single backup run.
|
|
||||||
type script struct {
|
|
||||||
ctx context.Context
|
|
||||||
cli *client.Client
|
|
||||||
mc *minio.Client
|
|
||||||
logger *logrus.Logger
|
|
||||||
|
|
||||||
start time.Time
|
|
||||||
file string
|
|
||||||
|
|
||||||
c *config
|
|
||||||
}
|
|
||||||
|
|
||||||
type config struct {
|
|
||||||
BackupSources string `split_words:"true" default:"/backup"`
|
|
||||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
|
||||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
|
||||||
AwsS3BucketName string `split_words:"true"`
|
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
|
||||||
AwsEndpointProto string `split_words:"true" default:"https"`
|
|
||||||
AwsEndpointInsecure bool `split_words:"true"`
|
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
|
||||||
GpgPassphrase string `split_words:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// newScript creates all resources needed for the script to perform actions against
|
|
||||||
// remote resources like the Docker engine or remote storage locations. All
|
|
||||||
// reading from env vars or other configuration sources is expected to happen
|
|
||||||
// in this method.
|
|
||||||
func newScript() (*script, error) {
|
|
||||||
s := &script{
|
|
||||||
c: &config{},
|
|
||||||
ctx: context.Background(),
|
|
||||||
logger: &logrus.Logger{
|
|
||||||
Out: os.Stdout,
|
|
||||||
Formatter: new(logrus.TextFormatter),
|
|
||||||
Hooks: make(logrus.LevelHooks),
|
|
||||||
Level: logrus.InfoLevel,
|
|
||||||
},
|
|
||||||
start: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := envconfig.Process("", s.c); err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
|
||||||
|
|
||||||
_, err := os.Stat("/var/run/docker.sock")
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to create docker client")
|
|
||||||
}
|
|
||||||
s.cli = cli
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.AwsS3BucketName != "" {
|
|
||||||
mc, err := minio.New(s.c.AwsEndpoint, &minio.Options{
|
|
||||||
Creds: credentials.NewStaticV4(
|
|
||||||
s.c.AwsAccessKeyID,
|
|
||||||
s.c.AwsSecretAccessKey,
|
|
||||||
"",
|
|
||||||
),
|
|
||||||
Secure: !s.c.AwsEndpointInsecure && s.c.AwsEndpointProto == "https",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
|
||||||
}
|
|
||||||
s.mc = mc
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var noop = func() error { return nil }
|
|
||||||
|
|
||||||
// stopContainers stops all Docker containers that are marked as to being
|
|
||||||
// stopped during the backup and returns a function that can be called to
|
|
||||||
// restart everything that has been stopped.
|
|
||||||
func (s *script) stopContainers() (func() error, error) {
|
|
||||||
if s.cli == nil {
|
|
||||||
return noop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
|
||||||
Quiet: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerLabel := fmt.Sprintf(
|
|
||||||
"docker-volume-backup.stop-during-backup=%s",
|
|
||||||
s.c.BackupStopContainerLabel,
|
|
||||||
)
|
|
||||||
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
|
||||||
Key: "label",
|
|
||||||
Value: containerLabel,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(containersToStop) == 0 {
|
|
||||||
return noop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Infof(
|
|
||||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
|
||||||
len(containersToStop),
|
|
||||||
containerLabel,
|
|
||||||
len(allContainers),
|
|
||||||
)
|
|
||||||
|
|
||||||
var stoppedContainers []types.Container
|
|
||||||
var stopErrors []error
|
|
||||||
for _, container := range containersToStop {
|
|
||||||
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
|
|
||||||
stopErrors = append(stopErrors, err)
|
|
||||||
} else {
|
|
||||||
stoppedContainers = append(stoppedContainers, container)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(stopErrors) != 0 {
|
|
||||||
return noop, fmt.Errorf(
|
|
||||||
"stopContainersAndRun: %d error(s) stopping containers: %w",
|
|
||||||
len(stopErrors),
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() error {
|
|
||||||
servicesRequiringUpdate := map[string]struct{}{}
|
|
||||||
|
|
||||||
var restartErrors []error
|
|
||||||
for _, container := range stoppedContainers {
|
|
||||||
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
|
|
||||||
servicesRequiringUpdate[swarmServiceName] = struct{}{}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := s.cli.ContainerStart(s.ctx, container.ID, types.ContainerStartOptions{}); err != nil {
|
|
||||||
restartErrors = append(restartErrors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(servicesRequiringUpdate) != 0 {
|
|
||||||
services, _ := s.cli.ServiceList(s.ctx, types.ServiceListOptions{})
|
|
||||||
for serviceName := range servicesRequiringUpdate {
|
|
||||||
var serviceMatch swarm.Service
|
|
||||||
for _, service := range services {
|
|
||||||
if service.Spec.Name == serviceName {
|
|
||||||
serviceMatch = service
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if serviceMatch.ID == "" {
|
|
||||||
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
|
|
||||||
}
|
|
||||||
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
|
|
||||||
_, err := s.cli.ServiceUpdate(
|
|
||||||
s.ctx, serviceMatch.ID,
|
|
||||||
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
restartErrors = append(restartErrors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(restartErrors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
|
|
||||||
len(restartErrors),
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Restarted %d container(s) and the matching service(s).",
|
|
||||||
len(stoppedContainers),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// takeBackup creates a tar archive of the configured backup location and
|
|
||||||
// saves it to disk.
|
|
||||||
func (s *script) takeBackup() error {
|
|
||||||
s.file = timeutil.Strftime(&s.start, s.file)
|
|
||||||
if err := targz.Compress(s.c.BackupSources, s.file); err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Created backup of `%s` at `%s`.", s.c.BackupSources, s.file)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
|
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
|
||||||
// untouched.
|
|
||||||
func (s *script) encryptBackup() error {
|
|
||||||
if s.c.GpgPassphrase == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer os.Remove(s.file)
|
|
||||||
|
|
||||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
|
||||||
outFile, err := os.Create(gpgFile)
|
|
||||||
defer outFile.Close()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
|
||||||
IsBinary: true,
|
|
||||||
FileName: name,
|
|
||||||
}, nil)
|
|
||||||
defer dst.Close()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error opening backup file %s: %w", s.file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = gpgFile
|
|
||||||
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyBackup makes sure the backup file is copied to both local and remote locations
|
|
||||||
// as per the given configuration.
|
|
||||||
func (s *script) copyBackup() error {
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
if s.c.AwsS3BucketName != "" {
|
|
||||||
_, err := s.mc.FPutObject(s.ctx, s.c.AwsS3BucketName, name, s.file, minio.PutObjectOptions{
|
|
||||||
ContentType: "application/tar+gzip",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`", s.file, s.c.AwsS3BucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
|
||||||
if err := copy(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
|
||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`", s.file, s.c.BackupArchive)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeArtifacts removes the backup file from disk.
|
|
||||||
func (s *script) removeArtifacts() error {
|
|
||||||
if err := os.Remove(s.file); err != nil {
|
|
||||||
return fmt.Errorf("removeArtifacts: error removing file: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info("Removed local artifacts.")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneOldBackups rotates away backups from local and remote storages using
|
|
||||||
// the given configuration. In case the given configuration would delete all
|
|
||||||
// backups, it does nothing instead.
|
|
||||||
func (s *script) pruneOldBackups() error {
|
|
||||||
if s.c.BackupRetentionDays < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.BackupPruningLeeway != 0 {
|
|
||||||
s.logger.Infof("Sleeping for %s before pruning backups.", s.c.BackupPruningLeeway)
|
|
||||||
time.Sleep(s.c.BackupPruningLeeway)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Infof("Trying to prune backups older than %d day(s) now.", s.c.BackupRetentionDays)
|
|
||||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays))
|
|
||||||
|
|
||||||
if s.c.AwsS3BucketName != "" {
|
|
||||||
candidates := s.mc.ListObjects(s.ctx, s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
|
||||||
WithMetadata: true,
|
|
||||||
Prefix: s.c.BackupPruningPrefix,
|
|
||||||
})
|
|
||||||
|
|
||||||
var matches []minio.ObjectInfo
|
|
||||||
var lenCandidates int
|
|
||||||
for candidate := range candidates {
|
|
||||||
lenCandidates++
|
|
||||||
if candidate.Err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error looking up candidates from remote storage: %w",
|
|
||||||
candidate.Err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if candidate.LastModified.Before(deadline) {
|
|
||||||
matches = append(matches, candidate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(matches) != 0 && len(matches) != lenCandidates {
|
|
||||||
objectsCh := make(chan minio.ObjectInfo)
|
|
||||||
go func() {
|
|
||||||
for _, match := range matches {
|
|
||||||
objectsCh <- match
|
|
||||||
}
|
|
||||||
close(objectsCh)
|
|
||||||
}()
|
|
||||||
errChan := s.mc.RemoveObjects(s.ctx, s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
|
|
||||||
var errors []error
|
|
||||||
for result := range errChan {
|
|
||||||
if result.Err != nil {
|
|
||||||
errors = append(errors, result.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
|
|
||||||
len(errors),
|
|
||||||
errors[0],
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period of %d days.",
|
|
||||||
len(matches),
|
|
||||||
lenCandidates,
|
|
||||||
s.c.BackupRetentionDays,
|
|
||||||
)
|
|
||||||
} else if len(matches) != 0 && len(matches) == lenCandidates {
|
|
||||||
s.logger.Warnf(
|
|
||||||
"The current configuration would delete all %d remote backup copies.",
|
|
||||||
len(matches),
|
|
||||||
)
|
|
||||||
s.logger.Warn("Refusing to do so, please check your configuration.")
|
|
||||||
} else {
|
|
||||||
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
|
||||||
candidates, err := filepath.Glob(
|
|
||||||
path.Join(s.c.BackupArchive, fmt.Sprintf("%s*", s.c.BackupPruningPrefix)),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error looking up matching files, starting with: %w", err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
var matches []string
|
|
||||||
for _, candidate := range candidates {
|
|
||||||
fi, err := os.Stat(candidate)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error calling stat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.ModTime().Before(deadline) {
|
|
||||||
matches = append(matches, candidate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(matches) != 0 && len(matches) != len(candidates) {
|
|
||||||
var errors []error
|
|
||||||
for _, candidate := range matches {
|
|
||||||
if err := os.Remove(candidate); err != nil {
|
|
||||||
errors = append(errors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(errors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
|
|
||||||
len(errors),
|
|
||||||
errors[0],
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period of %d days.",
|
|
||||||
len(matches),
|
|
||||||
len(candidates),
|
|
||||||
s.c.BackupRetentionDays,
|
|
||||||
)
|
|
||||||
} else if len(matches) != 0 && len(matches) == len(candidates) {
|
|
||||||
s.logger.Warnf(
|
|
||||||
"The current configuration would delete all %d local backup copies.",
|
|
||||||
len(matches),
|
|
||||||
)
|
|
||||||
s.logger.Warn("Refusing to do so, please check your configuration.")
|
|
||||||
} else {
|
|
||||||
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *script) must(err error) {
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Fatalf("Fatal error running backup: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
|
||||||
// caller invokes the returned release func. When invoked while the file is
|
|
||||||
// still locked the function panics.
|
|
||||||
func lock(lockfile string) func() error {
|
|
||||||
fileLock := flock.New(lockfile)
|
|
||||||
acquired, err := fileLock.TryLock()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if !acquired {
|
|
||||||
panic("unable to acquire file lock")
|
|
||||||
}
|
|
||||||
return fileLock.Unlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy creates a copy of the file located at `dst` at `src`.
|
|
||||||
func copy(src, dst string) error {
|
|
||||||
in, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
out, err := os.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
if err != nil {
|
|
||||||
out.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return out.Close()
|
|
||||||
}
|
}
|
||||||
|
|||||||
107
cmd/backup/notifications.go
Normal file
107
cmd/backup/notifications.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed notifications.tmpl
|
||||||
|
var defaultNotifications string
|
||||||
|
|
||||||
|
// NotificationData data to be passed to the notification templates
|
||||||
|
type NotificationData struct {
|
||||||
|
Error error
|
||||||
|
Config *Config
|
||||||
|
Stats *Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify sends a notification using the given title and body templates.
|
||||||
|
// Automatically creates notification data, adding the given error
|
||||||
|
func (s *script) notify(titleTemplate string, bodyTemplate string, err error) error {
|
||||||
|
params := NotificationData{
|
||||||
|
Error: err,
|
||||||
|
Stats: s.stats,
|
||||||
|
Config: s.c,
|
||||||
|
}
|
||||||
|
|
||||||
|
titleBuf := &bytes.Buffer{}
|
||||||
|
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error executing %s template: %w", titleTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBuf := &bytes.Buffer{}
|
||||||
|
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error executing %s template: %w", bodyTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error notifying: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyFailure sends a notification about a failed backup run
|
||||||
|
func (s *script) notifyFailure(err error) error {
|
||||||
|
return s.notify("title_failure", "body_failure", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyFailure sends a notification about a successful backup run
|
||||||
|
func (s *script) notifySuccess() error {
|
||||||
|
return s.notify("title_success", "body_success", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendNotification sends a notification to all configured third party services
|
||||||
|
func (s *script) sendNotification(title, body string) error {
|
||||||
|
var errs []error
|
||||||
|
for _, result := range s.sender.Send(body, &sTypes.Params{"title": title}) {
|
||||||
|
if result != nil {
|
||||||
|
errs = append(errs, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(errs) != 0 {
|
||||||
|
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var templateHelpers = template.FuncMap{
|
||||||
|
"formatTime": func(t time.Time) string {
|
||||||
|
return t.Format(time.RFC3339)
|
||||||
|
},
|
||||||
|
"formatBytesDec": func(bytes uint64) string {
|
||||||
|
return formatBytes(bytes, true)
|
||||||
|
},
|
||||||
|
"formatBytesBin": func(bytes uint64) string {
|
||||||
|
return formatBytes(bytes, false)
|
||||||
|
},
|
||||||
|
"env": os.Getenv,
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatBytes converts an amount of bytes in a human-readable representation
|
||||||
|
// the decimal parameter specifies if using powers of 1000 (decimal) or powers of 1024 (binary)
|
||||||
|
func formatBytes(b uint64, decimal bool) string {
|
||||||
|
unit := uint64(1024)
|
||||||
|
format := "%.1f %ciB"
|
||||||
|
if decimal {
|
||||||
|
unit = uint64(1000)
|
||||||
|
format = "%.1f %cB"
|
||||||
|
}
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := unit, 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(format, float64(b)/float64(div), "kMGTPE"[exp])
|
||||||
|
}
|
||||||
26
cmd/backup/notifications.tmpl
Normal file
26
cmd/backup/notifications.tmpl
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
{{ define "title_failure" -}}
|
||||||
|
Failure running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "body_failure" -}}
|
||||||
|
Running docker-volume-backup failed with error: {{ .Error }}
|
||||||
|
|
||||||
|
Log output of the failed run was:
|
||||||
|
|
||||||
|
{{ .Stats.LogOutput }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "title_success" -}}
|
||||||
|
Success running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "body_success" -}}
|
||||||
|
Running docker-volume-backup succeeded.
|
||||||
|
|
||||||
|
Log output was:
|
||||||
|
|
||||||
|
{{ .Stats.LogOutput }}
|
||||||
|
{{- end }}
|
||||||
843
cmd/backup/script.go
Normal file
843
cmd/backup/script.go
Normal file
@@ -0,0 +1,843 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containrrr/shoutrrr"
|
||||||
|
"github.com/containrrr/shoutrrr/pkg/router"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/kelseyhightower/envconfig"
|
||||||
|
"github.com/leekchan/timeutil"
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/otiai10/copy"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/studio-b12/gowebdav"
|
||||||
|
"golang.org/x/crypto/openpgp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
// script holds all the stateful information required to orchestrate a
|
||||||
|
// single backup run.
|
||||||
|
type script struct {
|
||||||
|
cli *client.Client
|
||||||
|
minioClient *minio.Client
|
||||||
|
webdavClient *gowebdav.Client
|
||||||
|
sshClient *ssh.Client
|
||||||
|
sftpClient *sftp.Client
|
||||||
|
logger *logrus.Logger
|
||||||
|
sender *router.ServiceRouter
|
||||||
|
template *template.Template
|
||||||
|
hooks []hook
|
||||||
|
hookLevel hookLevel
|
||||||
|
|
||||||
|
file string
|
||||||
|
stats *Stats
|
||||||
|
|
||||||
|
encounteredLock bool
|
||||||
|
|
||||||
|
c *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// newScript creates all resources needed for the script to perform actions against
|
||||||
|
// remote resources like the Docker engine or remote storage locations. All
|
||||||
|
// reading from env vars or other configuration sources is expected to happen
|
||||||
|
// in this method.
|
||||||
|
func newScript() (*script, error) {
|
||||||
|
stdOut, logBuffer := buffer(os.Stdout)
|
||||||
|
s := &script{
|
||||||
|
c: &Config{},
|
||||||
|
logger: &logrus.Logger{
|
||||||
|
Out: stdOut,
|
||||||
|
Formatter: new(logrus.TextFormatter),
|
||||||
|
Hooks: make(logrus.LevelHooks),
|
||||||
|
Level: logrus.InfoLevel,
|
||||||
|
},
|
||||||
|
stats: &Stats{
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LogOutput: logBuffer,
|
||||||
|
Storages: StoragesStats{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
s.stats.EndTime = time.Now()
|
||||||
|
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := envconfig.Process("", s.c); err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||||
|
if s.c.BackupFilenameExpand {
|
||||||
|
s.file = os.ExpandEnv(s.file)
|
||||||
|
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
||||||
|
s.c.BackupPruningPrefix = os.ExpandEnv(s.c.BackupPruningPrefix)
|
||||||
|
}
|
||||||
|
s.file = timeutil.Strftime(&s.stats.StartTime, s.file)
|
||||||
|
|
||||||
|
_, err := os.Stat("/var/run/docker.sock")
|
||||||
|
_, dockerHostSet := os.LookupEnv("DOCKER_HOST")
|
||||||
|
if !os.IsNotExist(err) || dockerHostSet {
|
||||||
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to create docker client")
|
||||||
|
}
|
||||||
|
s.cli = cli
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.AwsS3BucketName != "" {
|
||||||
|
var creds *credentials.Credentials
|
||||||
|
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
|
||||||
|
creds = credentials.NewStaticV4(
|
||||||
|
s.c.AwsAccessKeyID,
|
||||||
|
s.c.AwsSecretAccessKey,
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
} else if s.c.AwsIamRoleEndpoint != "" {
|
||||||
|
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
options := minio.Options{
|
||||||
|
Creds: creds,
|
||||||
|
Secure: s.c.AwsEndpointProto == "https",
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.AwsEndpointInsecure {
|
||||||
|
if !options.Secure {
|
||||||
|
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||||
|
}
|
||||||
|
|
||||||
|
transport, err := minio.DefaultTransport(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to create default minio transport")
|
||||||
|
}
|
||||||
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
|
options.Transport = transport
|
||||||
|
}
|
||||||
|
|
||||||
|
mc, err := minio.New(s.c.AwsEndpoint, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
||||||
|
}
|
||||||
|
s.minioClient = mc
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.WebdavUrl != "" {
|
||||||
|
if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" {
|
||||||
|
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided")
|
||||||
|
} else {
|
||||||
|
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
||||||
|
s.webdavClient = webdavClient
|
||||||
|
if s.c.WebdavUrlInsecure {
|
||||||
|
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport")
|
||||||
|
}
|
||||||
|
webdavTransport := defaultTransport.Clone()
|
||||||
|
webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure
|
||||||
|
s.webdavClient.SetTransport(webdavTransport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.SSHHostName != "" {
|
||||||
|
var authMethods []ssh.AuthMethod
|
||||||
|
|
||||||
|
if s.c.SSHPassword != "" {
|
||||||
|
authMethods = append(authMethods, ssh.Password(s.c.SSHPassword))
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.SSHIdentityFile); err == nil {
|
||||||
|
key, err := ioutil.ReadFile(s.c.SSHIdentityFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error reading the private key")
|
||||||
|
}
|
||||||
|
|
||||||
|
var signer ssh.Signer
|
||||||
|
if s.c.SSHIdentityPassphrase != "" {
|
||||||
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the encrypted private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
} else {
|
||||||
|
signer, err = ssh.ParsePrivateKey(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("newScript: error parsing the private key")
|
||||||
|
}
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sshClientConfig := &ssh.ClientConfig{
|
||||||
|
User: s.c.SSHUser,
|
||||||
|
Auth: authMethods,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
}
|
||||||
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig)
|
||||||
|
s.sshClient = sshClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating ssh client: %w", err)
|
||||||
|
}
|
||||||
|
_, _, err = s.sshClient.SendRequest("keepalive", false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sftpClient, err := sftp.NewClient(sshClient)
|
||||||
|
s.sftpClient = sftpClient
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating sftp client: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.EmailNotificationRecipient != "" {
|
||||||
|
emailURL := fmt.Sprintf(
|
||||||
|
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||||
|
s.c.EmailSMTPUsername,
|
||||||
|
s.c.EmailSMTPPassword,
|
||||||
|
s.c.EmailSMTPHost,
|
||||||
|
s.c.EmailSMTPPort,
|
||||||
|
s.c.EmailNotificationSender,
|
||||||
|
s.c.EmailNotificationRecipient,
|
||||||
|
)
|
||||||
|
s.c.NotificationURLs = append(s.c.NotificationURLs, emailURL)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using EMAIL_* keys for providing notification configuration has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use NOTIFICATION_URLS instead. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
||||||
|
}
|
||||||
|
s.hookLevel = hookLevel
|
||||||
|
|
||||||
|
if len(s.c.NotificationURLs) > 0 {
|
||||||
|
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||||
|
if senderErr != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
||||||
|
}
|
||||||
|
s.sender = sender
|
||||||
|
|
||||||
|
tmpl := template.New("")
|
||||||
|
tmpl.Funcs(templateHelpers)
|
||||||
|
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||||
|
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.template = tmpl
|
||||||
|
|
||||||
|
// To prevent duplicate notifications, ensure the regsistered callbacks
|
||||||
|
// run mutually exclusive.
|
||||||
|
s.registerHook(hookLevelError, func(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.notifyFailure(err)
|
||||||
|
})
|
||||||
|
s.registerHook(hookLevelInfo, func(err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.notifySuccess()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopContainers stops all Docker containers that are marked as to being
|
||||||
|
// stopped during the backup and returns a function that can be called to
|
||||||
|
// restart everything that has been stopped.
|
||||||
|
func (s *script) stopContainers() (func() error, error) {
|
||||||
|
if s.cli == nil {
|
||||||
|
return noop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
containerLabel := fmt.Sprintf(
|
||||||
|
"docker-volume-backup.stop-during-backup=%s",
|
||||||
|
s.c.BackupStopContainerLabel,
|
||||||
|
)
|
||||||
|
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: containerLabel,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(containersToStop) == 0 {
|
||||||
|
return noop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof(
|
||||||
|
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||||
|
len(containersToStop),
|
||||||
|
containerLabel,
|
||||||
|
len(allContainers),
|
||||||
|
)
|
||||||
|
|
||||||
|
var stoppedContainers []types.Container
|
||||||
|
var stopErrors []error
|
||||||
|
for _, container := range containersToStop {
|
||||||
|
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
|
||||||
|
stopErrors = append(stopErrors, err)
|
||||||
|
} else {
|
||||||
|
stoppedContainers = append(stoppedContainers, container)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var stopError error
|
||||||
|
if len(stopErrors) != 0 {
|
||||||
|
stopError = fmt.Errorf(
|
||||||
|
"stopContainersAndRun: %d error(s) stopping containers: %w",
|
||||||
|
len(stopErrors),
|
||||||
|
join(stopErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Containers = ContainersStats{
|
||||||
|
All: uint(len(allContainers)),
|
||||||
|
ToStop: uint(len(containersToStop)),
|
||||||
|
Stopped: uint(len(stoppedContainers)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
servicesRequiringUpdate := map[string]struct{}{}
|
||||||
|
|
||||||
|
var restartErrors []error
|
||||||
|
for _, container := range stoppedContainers {
|
||||||
|
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
|
||||||
|
servicesRequiringUpdate[swarmServiceName] = struct{}{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||||
|
restartErrors = append(restartErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(servicesRequiringUpdate) != 0 {
|
||||||
|
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||||
|
for serviceName := range servicesRequiringUpdate {
|
||||||
|
var serviceMatch swarm.Service
|
||||||
|
for _, service := range services {
|
||||||
|
if service.Spec.Name == serviceName {
|
||||||
|
serviceMatch = service
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if serviceMatch.ID == "" {
|
||||||
|
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
|
||||||
|
}
|
||||||
|
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
|
||||||
|
if _, err := s.cli.ServiceUpdate(
|
||||||
|
context.Background(), serviceMatch.ID,
|
||||||
|
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
||||||
|
); err != nil {
|
||||||
|
restartErrors = append(restartErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(restartErrors) != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
|
||||||
|
len(restartErrors),
|
||||||
|
join(restartErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
s.logger.Infof(
|
||||||
|
"Restarted %d container(s) and the matching service(s).",
|
||||||
|
len(stoppedContainers),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}, stopError
|
||||||
|
}
|
||||||
|
|
||||||
|
// createArchive creates a tar archive of the configured backup location and
|
||||||
|
// saves it to disk.
|
||||||
|
func (s *script) createArchive() error {
|
||||||
|
backupSources := s.c.BackupSources
|
||||||
|
|
||||||
|
if s.c.BackupFromSnapshot {
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
|
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||||
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(backupSources); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed snapshot `%s`.", backupSources)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||||
|
PreserveTimes: true,
|
||||||
|
PreserveOwner: true,
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
|
||||||
|
}
|
||||||
|
|
||||||
|
tarFile := s.file
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(tarFile); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error getting absolute path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var filesEligibleForBackup []string
|
||||||
|
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("compress: error walking filesystem tree: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
||||||
|
// In case no passphrase is given it returns early, leaving the backup file
|
||||||
|
// untouched.
|
||||||
|
func (s *script) encryptArchive() error {
|
||||||
|
if s.c.GpgPassphrase == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(gpgFile); err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
outFile, err := os.Create(gpgFile)
|
||||||
|
defer outFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||||
|
IsBinary: true,
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
defer dst.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.file = gpgFile
|
||||||
|
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyArchive makes sure the backup file is copied to both local and remote locations
|
||||||
|
// as per the given configuration.
|
||||||
|
func (s *script) copyArchive() error {
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
if stat, err := os.Stat(s.file); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
|
||||||
|
} else {
|
||||||
|
size := stat.Size()
|
||||||
|
s.stats.BackupFile = BackupFileStats{
|
||||||
|
Size: uint64(size),
|
||||||
|
Name: name,
|
||||||
|
FullPath: s.file,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.minioClient != nil {
|
||||||
|
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
|
||||||
|
ContentType: "application/tar+gzip",
|
||||||
|
StorageClass: s.c.AwsStorageClass,
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.webdavClient != nil {
|
||||||
|
bytes, err := os.ReadFile(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err)
|
||||||
|
}
|
||||||
|
if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
source, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
|
||||||
|
chunk := make([]byte, 1000000)
|
||||||
|
for {
|
||||||
|
num, err := source.Read(chunk)
|
||||||
|
if err == io.EOF {
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tot, err := destination.Write(chunk[:num])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tot != len(chunk[:num]) {
|
||||||
|
return fmt.Errorf("sshClient: failed to write stream")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
|
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
|
||||||
|
if s.c.BackupLatestSymlink != "" {
|
||||||
|
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
|
||||||
|
if _, err := os.Lstat(symlink); err == nil {
|
||||||
|
os.Remove(symlink)
|
||||||
|
}
|
||||||
|
if err := os.Symlink(name, symlink); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pruneBackups rotates away backups from local and remote storages using
|
||||||
|
// the given configuration. In case the given configuration would delete all
|
||||||
|
// backups, it does nothing instead and logs a warning.
|
||||||
|
func (s *script) pruneBackups() error {
|
||||||
|
if s.c.BackupRetentionDays < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
||||||
|
|
||||||
|
// doPrune holds general control flow that applies to any kind of storage.
|
||||||
|
// Callers can pass in a thunk that performs the actual deletion of files.
|
||||||
|
var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
|
||||||
|
if lenMatches != 0 && lenMatches != lenCandidates {
|
||||||
|
if err := doRemoveFiles(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.logger.Infof(
|
||||||
|
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
|
||||||
|
lenMatches,
|
||||||
|
lenCandidates,
|
||||||
|
description,
|
||||||
|
s.c.BackupRetentionDays,
|
||||||
|
)
|
||||||
|
} else if lenMatches != 0 && lenMatches == lenCandidates {
|
||||||
|
s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description)
|
||||||
|
s.logger.Warn("Refusing to do so, please check your configuration.")
|
||||||
|
} else {
|
||||||
|
s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.minioClient != nil {
|
||||||
|
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
||||||
|
WithMetadata: true,
|
||||||
|
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
|
||||||
|
Recursive: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
var matches []minio.ObjectInfo
|
||||||
|
var lenCandidates int
|
||||||
|
for candidate := range candidates {
|
||||||
|
lenCandidates++
|
||||||
|
if candidate.Err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error looking up candidates from remote storage: %w",
|
||||||
|
candidate.Err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if candidate.LastModified.Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.S3 = StorageStats{
|
||||||
|
Total: uint(lenCandidates),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), lenCandidates, "remote backup(s)", func() error {
|
||||||
|
objectsCh := make(chan minio.ObjectInfo)
|
||||||
|
go func() {
|
||||||
|
for _, match := range matches {
|
||||||
|
objectsCh <- match
|
||||||
|
}
|
||||||
|
close(objectsCh)
|
||||||
|
}()
|
||||||
|
errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
|
||||||
|
var removeErrors []error
|
||||||
|
for result := range errChan {
|
||||||
|
if result.Err != nil {
|
||||||
|
removeErrors = append(removeErrors, result.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(removeErrors) != 0 {
|
||||||
|
return join(removeErrors...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.webdavClient != nil {
|
||||||
|
candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err)
|
||||||
|
}
|
||||||
|
var matches []fs.FileInfo
|
||||||
|
var lenCandidates int
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lenCandidates++
|
||||||
|
if candidate.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.WebDAV = StorageStats{
|
||||||
|
Total: uint(lenCandidates),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.sshClient != nil {
|
||||||
|
candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if candidate.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.SSH = StorageStats{
|
||||||
|
Total: uint(len(candidates)),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), len(candidates), "SSH backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
|
globPattern := path.Join(
|
||||||
|
s.c.BackupArchive,
|
||||||
|
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
|
||||||
|
)
|
||||||
|
globMatches, err := filepath.Glob(globPattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error looking up matching files using pattern %s: %w",
|
||||||
|
globPattern,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var candidates []string
|
||||||
|
for _, candidate := range globMatches {
|
||||||
|
fi, err := os.Lstat(candidate)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error calling Lstat on file %s: %w",
|
||||||
|
candidate,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||||
|
candidates = append(candidates, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
fi, err := os.Stat(candidate)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error calling stat on file %s: %w",
|
||||||
|
candidate,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if fi.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.Local = StorageStats{
|
||||||
|
Total: uint(len(candidates)),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), len(candidates), "local backup(s)", func() error {
|
||||||
|
var removeErrors []error
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := os.Remove(match); err != nil {
|
||||||
|
removeErrors = append(removeErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(removeErrors) != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: %d error(s) deleting local files, starting with: %w",
|
||||||
|
len(removeErrors),
|
||||||
|
join(removeErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// must exits the script run prematurely in case the given error
|
||||||
|
// is non-nil.
|
||||||
|
func (s *script) must(err error) {
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Errorf("Fatal error running backup: %s", err)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
51
cmd/backup/stats.go
Normal file
51
cmd/backup/stats.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainersStats stats about the docker containers
|
||||||
|
type ContainersStats struct {
|
||||||
|
All uint
|
||||||
|
ToStop uint
|
||||||
|
Stopped uint
|
||||||
|
StopErrors uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupFileStats stats about the created backup file
|
||||||
|
type BackupFileStats struct {
|
||||||
|
Name string
|
||||||
|
FullPath string
|
||||||
|
Size uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageStats stats about the status of an archival directory
|
||||||
|
type StorageStats struct {
|
||||||
|
Total uint
|
||||||
|
Pruned uint
|
||||||
|
PruneErrors uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3)
|
||||||
|
type StoragesStats struct {
|
||||||
|
Local StorageStats
|
||||||
|
WebDAV StorageStats
|
||||||
|
SSH StorageStats
|
||||||
|
S3 StorageStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats global stats regarding script execution
|
||||||
|
type Stats struct {
|
||||||
|
StartTime time.Time
|
||||||
|
EndTime time.Time
|
||||||
|
TookTime time.Duration
|
||||||
|
LockedTime time.Duration
|
||||||
|
LogOutput *bytes.Buffer
|
||||||
|
Containers ContainersStats
|
||||||
|
BackupFile BackupFileStats
|
||||||
|
Storages StoragesStats
|
||||||
|
}
|
||||||
90
cmd/backup/util.go
Normal file
90
cmd/backup/util.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var noop = func() error { return nil }
|
||||||
|
|
||||||
|
// copy creates a copy of the file located at `dst` at `src`.
|
||||||
|
func copyFile(src, dst string) error {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
out.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return out.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// join takes a list of errors and joins them into a single error
|
||||||
|
func join(errs ...error) error {
|
||||||
|
if len(errs) == 1 {
|
||||||
|
return errs[0]
|
||||||
|
}
|
||||||
|
var msgs []string
|
||||||
|
for _, err := range errs {
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
}
|
||||||
|
return errors.New("[" + strings.Join(msgs, ", ") + "]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove removes the given file or directory from disk.
|
||||||
|
func remove(location string) error {
|
||||||
|
fi, err := os.Lstat(location)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
err = os.RemoveAll(location)
|
||||||
|
} else {
|
||||||
|
err = os.Remove(location)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buffer takes an io.Writer and returns a wrapped version of the
|
||||||
|
// writer that writes to both the original target as well as the returned buffer
|
||||||
|
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
|
||||||
|
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
|
||||||
|
return buffering, &buffering.buf
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferingWriter struct {
|
||||||
|
buf bytes.Buffer
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if n, err := b.buf.Write(p); err != nil {
|
||||||
|
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
|
||||||
|
}
|
||||||
|
return b.writer.Write(p)
|
||||||
|
}
|
||||||
40
docs/NOTIFICATION-TEMPLATES.md
Normal file
40
docs/NOTIFICATION-TEMPLATES.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Notification templates reference
|
||||||
|
|
||||||
|
In order to customize title and body of notifications you'll have to write a [go template](https://pkg.go.dev/text/template) and mount it inside the `/etc/dockervolumebackup/notifications.d/` directory.
|
||||||
|
|
||||||
|
Configuration, data about the backup run and helper functions will be passed to this template, this page documents them fully.
|
||||||
|
|
||||||
|
## Data
|
||||||
|
Here is a list of all data passed to the template:
|
||||||
|
|
||||||
|
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
|
||||||
|
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
|
||||||
|
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
|
||||||
|
* `StartTime`: time when the script started execution
|
||||||
|
* `EndTime`: time when the backup has completed successfully (after pruning)
|
||||||
|
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
||||||
|
* `LockedTime`: amount of time it took for the backup to acquire the exclusive lock
|
||||||
|
* `LogOutput`: full log of the application
|
||||||
|
* `Containers`: object containing stats about the docker containers
|
||||||
|
* `All`: total number of containers
|
||||||
|
* `ToStop`: number of containers matched by the stop rule
|
||||||
|
* `Stopped`: number of containers successfully stopped
|
||||||
|
* `StopErrors`: number of containers that were unable to be stopped (equal to `ToStop - Stopped`)
|
||||||
|
* `BackupFile`: object containing information about the backup file
|
||||||
|
* `Name`: name of the backup file (e.g. `backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
|
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
|
* `Size`: size in bytes of the backup file
|
||||||
|
* `Storages`: object that holds stats about each storage
|
||||||
|
* `Local`, `S3`, `WebDAV` or `SSH`:
|
||||||
|
* `Total`: total number of backup files
|
||||||
|
* `Pruned`: number of backup files that were deleted due to pruning rule
|
||||||
|
* `PruneErrors`: number of backup files that were unable to be pruned
|
||||||
|
|
||||||
|
## Functions
|
||||||
|
|
||||||
|
Some formatting and helper functions are also available:
|
||||||
|
|
||||||
|
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
|
||||||
|
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
|
||||||
|
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
|
||||||
|
* `env`: returns the value of the environment variable of the given key if set
|
||||||
@@ -5,10 +5,21 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
|
||||||
|
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||||
|
|
||||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||||
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
||||||
|
else
|
||||||
|
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
|
||||||
|
|
||||||
|
for file in /etc/dockervolumebackup/conf.d/*; do
|
||||||
|
source $file
|
||||||
|
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||||
|
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
|
||||||
|
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Starting cron in foreground."
|
echo "Starting cron in foreground."
|
||||||
crond -f -l 8
|
crond -f -d 8
|
||||||
|
|||||||
68
go.mod
68
go.mod
@@ -1,45 +1,67 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/docker/docker v20.10.8+incompatible
|
github.com/containrrr/shoutrrr v0.5.2
|
||||||
|
github.com/cosiner/argv v0.1.0
|
||||||
|
github.com/docker/docker v20.10.11+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.12
|
github.com/minio/minio-go/v7 v7.0.16
|
||||||
|
github.com/otiai10/copy v1.7.0
|
||||||
|
github.com/pkg/sftp v1.13.5
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a
|
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
|
||||||
|
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.4.17 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/containerd/containerd v1.5.5 // indirect
|
github.com/containerd/containerd v1.6.6 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||||
|
github.com/fatih/color v1.10.0 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.0 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.2.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.10 // indirect
|
github.com/gorilla/mux v1.7.3 // indirect
|
||||||
github.com/klauspost/cpuid v1.3.1 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/minio/md5-simd v1.1.0 // indirect
|
github.com/klauspost/compress v1.15.6 // indirect
|
||||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.8 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.12 // indirect
|
||||||
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
|
github.com/nxadm/tail v1.4.6 // indirect
|
||||||
|
github.com/onsi/ginkgo v1.14.2 // indirect
|
||||||
|
github.com/onsi/gomega v1.10.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.2.1 // indirect
|
github.com/rs/xid v1.3.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
golang.org/x/net v0.0.0-20220607020251-c690dde0001d // indirect
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/text v0.3.4 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/grpc v1.33.2 // indirect
|
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
|
||||||
google.golang.org/protobuf v1.26.0 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.57.0 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
|
gopkg.in/ini.v1 v1.65.0 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,10 +3,15 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
docker network create test_network
|
docker network create test_network
|
||||||
docker volume create backup_data
|
docker volume create backup_data
|
||||||
docker volume create app_data
|
docker volume create app_data
|
||||||
|
# This volume is created to test whether empty directories are handled
|
||||||
|
# correctly. It is not supposed to hold any data.
|
||||||
|
docker volume create empty_data
|
||||||
|
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name minio \
|
--name minio \
|
||||||
@@ -23,16 +28,15 @@ docker exec minio mkdir -p /data/backup
|
|||||||
docker run -d \
|
docker run -d \
|
||||||
--name offen \
|
--name offen \
|
||||||
--network test_network \
|
--network test_network \
|
||||||
--label "docker-volume-backup.stop-during-backup=true" \
|
|
||||||
-v app_data:/var/opt/offen/ \
|
-v app_data:/var/opt/offen/ \
|
||||||
offen/offen:latest
|
offen/offen:latest
|
||||||
|
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
docker run -d \
|
docker run --rm \
|
||||||
--name backup \
|
|
||||||
--network test_network \
|
--network test_network \
|
||||||
-v app_data:/backup/app_data \
|
-v app_data:/backup/app_data \
|
||||||
|
-v empty_data:/backup/empty_data \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
--env AWS_ACCESS_KEY_ID=test \
|
--env AWS_ACCESS_KEY_ID=test \
|
||||||
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
@@ -40,25 +44,20 @@ docker run -d \
|
|||||||
--env AWS_ENDPOINT_PROTO=http \
|
--env AWS_ENDPOINT_PROTO=http \
|
||||||
--env AWS_S3_BUCKET_NAME=backup \
|
--env AWS_S3_BUCKET_NAME=backup \
|
||||||
--env BACKUP_FILENAME=test.tar.gz \
|
--env BACKUP_FILENAME=test.tar.gz \
|
||||||
--env BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?" \
|
--env "BACKUP_FROM_SNAPSHOT=true" \
|
||||||
offen/docker-volume-backup:$TEST_VERSION
|
--entrypoint backup \
|
||||||
|
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
docker exec backup backup
|
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
pass "Found relevant files in untared remote backup."
|
||||||
|
|
||||||
if [ "$(docker ps -q | wc -l)" != "3" ]; then
|
# This test does not stop containers during backup. This is happening on
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
# purpose in order to cover this setup as well.
|
||||||
docker ps
|
expect_running_containers "2"
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
docker rm $(docker stop minio offen)
|
||||||
|
|
||||||
docker rm $(docker stop minio offen backup)
|
|
||||||
docker volume rm backup_data app_data
|
docker volume rm backup_data app_data
|
||||||
docker network rm test_network
|
docker network rm test_network
|
||||||
|
|||||||
37
test/commands/docker-compose.yml
Normal file
37
test/commands/docker-compose.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
database:
|
||||||
|
image: mariadb:10.7
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
MARIADB_ROOT_PASSWORD: test
|
||||||
|
MARIADB_DATABASE: backup
|
||||||
|
labels:
|
||||||
|
# this is testing the deprecated label on purpose
|
||||||
|
- docker-volume-backup.exec-pre=/bin/sh -c 'mysqldump -ptest --all-databases > /tmp/volume/dump.sql'
|
||||||
|
- docker-volume-backup.copy-post=/bin/sh -c 'echo "post" > /tmp/volume/post.txt'
|
||||||
|
- docker-volume-backup.exec-label=test
|
||||||
|
volumes:
|
||||||
|
- app_data:/tmp/volume
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
EXEC_LABEL: test
|
||||||
|
EXEC_FORWARD_OUTPUT: "true"
|
||||||
|
volumes:
|
||||||
|
- archive:/archive
|
||||||
|
- app_data:/backup/data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
|
archive:
|
||||||
59
test/commands/run.sh
Normal file
59
test/commands/run.sh
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 30 # mariadb likes to take a bit before responding
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
|
||||||
|
|
||||||
|
tar -xvf ./local/test.tar.gz
|
||||||
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
|
fail "Could not find file written by pre command."
|
||||||
|
fi
|
||||||
|
pass "Found expected file."
|
||||||
|
|
||||||
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
|
fail "File created in post command was present in backup."
|
||||||
|
fi
|
||||||
|
pass "Did not find unexpected file."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
sudo rm -rf ./local
|
||||||
|
|
||||||
|
|
||||||
|
info "Running commands test in swarm mode next."
|
||||||
|
|
||||||
|
docker swarm init
|
||||||
|
|
||||||
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
|
info "Backup container not ready yet. Retrying."
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
|
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
|
||||||
|
|
||||||
|
tar -xvf ./local/test.tar.gz
|
||||||
|
if [ ! -f ./backup/data/dump.sql ]; then
|
||||||
|
fail "Could not find file written by pre command."
|
||||||
|
fi
|
||||||
|
pass "Found expected file."
|
||||||
|
|
||||||
|
if [ -f ./backup/data/post.txt ]; then
|
||||||
|
fail "File created in post command was present in backup."
|
||||||
|
fi
|
||||||
|
pass "Did not find unexpected file."
|
||||||
|
|
||||||
|
docker stack rm test_stack
|
||||||
|
docker swarm leave --force
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd $(dirname $0)
|
|
||||||
|
|
||||||
mkdir -p local
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
docker-compose exec backup backup
|
|
||||||
|
|
||||||
docker run --rm -it \
|
|
||||||
-v compose_backup_data:/data alpine \
|
|
||||||
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test.tar.gz.gpg > /tmp/test.tar.gz && tar -xf /tmp/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
|
||||||
|
|
||||||
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
|
||||||
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
|
||||||
rm ./local/decrypted.tar.gz
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared local backup."
|
|
||||||
|
|
||||||
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
|
||||||
docker-compose ps
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
|
||||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
|
||||||
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
docker-compose exec backup backup
|
|
||||||
|
|
||||||
docker run --rm -it \
|
|
||||||
-v compose_backup_data:/data alpine \
|
|
||||||
ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]'
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Remote backups have not been deleted."
|
|
||||||
|
|
||||||
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
|
||||||
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
|
|
||||||
find ./local -type f
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] Local backups have not been deleted."
|
|
||||||
|
|
||||||
docker-compose down --volumes
|
|
||||||
1
test/confd/.gitignore
vendored
Normal file
1
test/confd/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
2
test/confd/01backup.env
Normal file
2
test/confd/01backup.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="conf.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
2
test/confd/02backup.env
Normal file
2
test/confd/02backup.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="other.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
2
test/confd/03never.env
Normal file
2
test/confd/03never.env
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
BACKUP_FILENAME="never.tar.gz"
|
||||||
|
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||||
23
test/confd/docker-compose.yml
Normal file
23
test/confd/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||||
|
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||||
|
- ./03never.env:/etc/dockervolumebackup/conf.d/03never.env
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
31
test/confd/run.sh
Executable file
31
test/confd/run.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||||
|
sleep 100
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
if [ ! -f ./local/conf.tar.gz ]; then
|
||||||
|
fail "Config from file was not used."
|
||||||
|
fi
|
||||||
|
pass "Config from file was used."
|
||||||
|
|
||||||
|
if [ ! -f ./local/other.tar.gz ]; then
|
||||||
|
fail "Run on same schedule did not succeed."
|
||||||
|
fi
|
||||||
|
pass "Run on same schedule succeeded."
|
||||||
|
|
||||||
|
if [ -f ./local/never.tar.gz ]; then
|
||||||
|
fail "Unexpected file was found."
|
||||||
|
fi
|
||||||
|
pass "Unexpected cron did not run."
|
||||||
1
test/gpg/.gitignore
vendored
Normal file
1
test/gpg/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
26
test/gpg/docker-compose.yml
Normal file
26
test/gpg/docker-compose.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
GPG_PASSPHRASE: 1234secret
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
34
test/gpg/run.sh
Executable file
34
test/gpg/run.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
|
||||||
|
echo 1234secret | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||||
|
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
|
||||||
|
ls -lah $tmp_dir
|
||||||
|
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm ./local/decrypted.tar.gz
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
1
test/ignore/.gitignore
vendored
Normal file
1
test/ignore/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
15
test/ignore/docker-compose.yml
Normal file
15
test/ignore/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- ./sources:/backup/data:ro
|
||||||
28
test/ignore/run.sh
Normal file
28
test/ignore/run.sh
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
|
||||||
|
out=$(mktemp -d)
|
||||||
|
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
|
||||||
|
|
||||||
|
if [ ! -f "$out/backup/data/me.txt" ]; then
|
||||||
|
fail "Expected file was not found."
|
||||||
|
fi
|
||||||
|
pass "Expected file was found."
|
||||||
|
|
||||||
|
if [ -f "$out/backup/data/skip.me" ]; then
|
||||||
|
fail "Ignored file was found."
|
||||||
|
fi
|
||||||
|
pass "Ignored file was not found."
|
||||||
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/me.txt
Normal file
0
test/ignore/sources/skip.me
Normal file
0
test/ignore/sources/skip.me
Normal file
1
test/local/.gitignore
vendored
Normal file
1
test/local/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
29
test/local/docker-compose.yml
Normal file
29
test/local/docker-compose.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz.gpg
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ./local:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
55
test/local/run.sh
Executable file
55
test/local/run.sh
Executable file
@@ -0,0 +1,55 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# A symlink for a known file in the volume is created so the test can check
|
||||||
|
# whether symlinks are preserved on backup.
|
||||||
|
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
|
||||||
|
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm -f ./local/test-hostnametoken.tar.gz
|
||||||
|
|
||||||
|
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
|
||||||
|
fail "Could not find expected symlink in untared archive."
|
||||||
|
fi
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
|
||||||
|
fail "Could not find symlink to latest version."
|
||||||
|
fi
|
||||||
|
|
||||||
|
pass "Found symlink to latest version in local backup."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
||||||
|
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
|
||||||
|
fi
|
||||||
|
pass "Local backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
1
test/notifications/.gitignore
vendored
Normal file
1
test/notifications/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
37
test/notifications/docker-compose.yml
Normal file
37
test/notifications/docker-compose.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
NOTIFICATION_LEVEL: info
|
||||||
|
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
|
||||||
|
EXTRA_VALUE: extra-value
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- ./notifications.tmpl:/etc/dockervolumebackup/notifications.d/notifications.tmpl
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
gotify:
|
||||||
|
image: gotify/server
|
||||||
|
ports:
|
||||||
|
- 8080:80
|
||||||
|
environment:
|
||||||
|
- GOTIFY_DEFAULTUSER_PASS=custom
|
||||||
|
volumes:
|
||||||
|
- gotify_data:/app/data
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
|
gotify_data:
|
||||||
7
test/notifications/notifications.tmpl
Normal file
7
test/notifications/notifications.tmpl
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{{ define "title_success" -}}
|
||||||
|
Successful test run with {{ env "EXTRA_VALUE" }}, yay!
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{ define "body_success" -}}
|
||||||
|
Backing up {{ .Stats.BackupFile.FullPath }} succeeded.
|
||||||
|
{{- end }}
|
||||||
50
test/notifications/run.sh
Executable file
50
test/notifications/run.sh
Executable file
@@ -0,0 +1,50 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
|
||||||
|
info "Set up Gotify application using token $GOTIFY_TOKEN"
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
|
if [ "$NUM_MESSAGES" != 0 ]; then
|
||||||
|
fail "Expected no notifications to be sent when not configured"
|
||||||
|
fi
|
||||||
|
pass "No notifications were sent when not configured."
|
||||||
|
|
||||||
|
docker-compose down
|
||||||
|
|
||||||
|
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker-compose up -d
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
|
if [ "$NUM_MESSAGES" != 1 ]; then
|
||||||
|
fail "Expected one notifications to be sent when configured"
|
||||||
|
fi
|
||||||
|
pass "Correct number of notifications were sent when configured."
|
||||||
|
|
||||||
|
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
|
||||||
|
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
|
||||||
|
|
||||||
|
if [ "$MESSAGE_TITLE" != "Successful test run with extra-value, yay!" ]; then
|
||||||
|
fail "Unexpected notification title $MESSAGE_TITLE"
|
||||||
|
fi
|
||||||
|
pass "Custom notification title was used."
|
||||||
|
|
||||||
|
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
|
||||||
|
fail "Unexpected notification body $MESSAGE_BODY"
|
||||||
|
fi
|
||||||
|
pass "Custom notification body was used."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
1
test/ownership/.gitignore
vendored
Normal file
1
test/ownership/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
27
test/ownership/docker-compose.yml
Normal file
27
test/ownership/docker-compose.yml
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
image: postgres:14-alpine
|
||||||
|
restart: unless-stopped
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
- POSTGRES_PASSWORD=1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
|
||||||
|
- POSTGRES_USER=test
|
||||||
|
- POSTGRES_DB=test
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: backup.tar.gz
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/backup/postgres:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- ./local:/archive
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
30
test/ownership/run.sh
Normal file
30
test/ownership/run.sh
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# This test refers to https://github.com/offen/docker-volume-backup/issues/71
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
|
||||||
|
|
||||||
|
sudo find $tmp_dir/backup/postgres > /dev/null
|
||||||
|
pass "Backup contains files at expected location"
|
||||||
|
|
||||||
|
for file in $(sudo find $tmp_dir/backup/postgres); do
|
||||||
|
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
|
||||||
|
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
pass "All files and directories in backup preserved their ownership."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
@@ -10,10 +10,11 @@ services:
|
|||||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||||
volumes:
|
volumes:
|
||||||
- backup_data:/data
|
- minio_backup_data:/data
|
||||||
|
|
||||||
backup: &default_backup_service
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
restart: always
|
restart: always
|
||||||
@@ -23,14 +24,13 @@ services:
|
|||||||
AWS_ENDPOINT: minio:9000
|
AWS_ENDPOINT: minio:9000
|
||||||
AWS_ENDPOINT_PROTO: http
|
AWS_ENDPOINT_PROTO: http
|
||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
GPG_PASSPHRASE: 1234secret
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
@@ -42,5 +42,6 @@ services:
|
|||||||
- app_data:/var/opt/offen
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backup_data:
|
minio_backup_data:
|
||||||
|
name: minio_backup_data
|
||||||
app_data:
|
app_data:
|
||||||
42
test/s3/run.sh
Executable file
42
test/s3/run.sh
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# A symlink for a known file in the volume is created so the test can check
|
||||||
|
# whether symlinks are preserved on backup.
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v minio_backup_data:/minio_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in untared remote backups."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v minio_backup_data:/minio_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
47
test/ssh/docker-compose.yml
Normal file
47
test/ssh/docker-compose.yml
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
ssh:
|
||||||
|
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||||
|
environment:
|
||||||
|
- PUID=1000
|
||||||
|
- PGID=1000
|
||||||
|
- USER_NAME=test
|
||||||
|
volumes:
|
||||||
|
- ./id_rsa.pub:/config/.ssh/authorized_keys
|
||||||
|
- ssh_backup_data:/tmp
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
depends_on:
|
||||||
|
- ssh
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
SSH_HOST_NAME: ssh
|
||||||
|
SSH_PORT: 2222
|
||||||
|
SSH_USER: test
|
||||||
|
SSH_REMOTE_PATH: /tmp
|
||||||
|
SSH_IDENTITY_PASSPHRASE: test1234
|
||||||
|
volumes:
|
||||||
|
- ./id_rsa:/root/.ssh/id_rsa
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ssh_backup_data:
|
||||||
|
name: ssh_backup_data
|
||||||
|
app_data:
|
||||||
43
test/ssh/run.sh
Executable file
43
test/ssh/run.sh
Executable file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers 3
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v ssh_backup_data:/ssh_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared remote backups."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v ssh_backup_data:/ssh_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
|
rm -f id_rsa id_rsa.pub
|
||||||
@@ -18,8 +18,8 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- backup_data:/data
|
- backup_data:/data
|
||||||
|
|
||||||
backup: &default_backup_service
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
deploy:
|
deploy:
|
||||||
@@ -43,13 +43,15 @@ services:
|
|||||||
image: offen/offen:latest
|
image: offen/offen:latest
|
||||||
labels:
|
labels:
|
||||||
- docker-volume-backup.stop-during-backup=true
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
healthcheck:
|
||||||
|
disable: true
|
||||||
deploy:
|
deploy:
|
||||||
replicas: 2
|
replicas: 2
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
|
|
||||||
pg:
|
pg:
|
||||||
image: postgres:12.2-alpine
|
image: postgres:14-alpine
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_PASSWORD: example
|
POSTGRES_PASSWORD: example
|
||||||
labels:
|
labels:
|
||||||
@@ -62,4 +64,5 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backup_data:
|
backup_data:
|
||||||
|
name: backup_data
|
||||||
pg_data:
|
pg_data:
|
||||||
|
|||||||
@@ -3,13 +3,15 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd $(dirname $0)
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
docker swarm init
|
docker swarm init
|
||||||
|
|
||||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||||
|
|
||||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||||
echo "[TEST:INFO] Backup container not ready yet. Retrying."
|
info "Backup container not ready yet. Retrying."
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -18,19 +20,13 @@ sleep 20
|
|||||||
docker exec $(docker ps -q -f name=backup) backup
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v test_stack_backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
pass "Found relevant files in untared backup."
|
||||||
|
|
||||||
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
sleep 5
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
expect_running_containers "5"
|
||||||
docker ps -a
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
docker stack rm test_stack
|
docker stack rm test_stack
|
||||||
|
|
||||||
docker swarm leave --force
|
docker swarm leave --force
|
||||||
|
|||||||
23
test/util.sh
Normal file
23
test/util.sh
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
info () {
|
||||||
|
echo "[test:${current_test:-none}:info] "$1""
|
||||||
|
}
|
||||||
|
|
||||||
|
pass () {
|
||||||
|
echo "[test:${current_test:-none}:pass] "$1""
|
||||||
|
}
|
||||||
|
|
||||||
|
fail () {
|
||||||
|
echo "[test:${current_test:-none}:fail] "$1""
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
expect_running_containers () {
|
||||||
|
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
|
||||||
|
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
|
||||||
|
fi
|
||||||
|
pass "$1 containers running."
|
||||||
|
}
|
||||||
45
test/webdav/docker-compose.yml
Normal file
45
test/webdav/docker-compose.yml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
webdav:
|
||||||
|
image: bytemark/webdav:2.4
|
||||||
|
environment:
|
||||||
|
AUTH_TYPE: Digest
|
||||||
|
USERNAME: test
|
||||||
|
PASSWORD: test
|
||||||
|
volumes:
|
||||||
|
- webdav_backup_data:/var/lib/dav
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
depends_on:
|
||||||
|
- webdav
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
WEBDAV_URL: http://webdav/
|
||||||
|
WEBDAV_URL_INSECURE: 'true'
|
||||||
|
WEBDAV_PATH: /my/new/path/
|
||||||
|
WEBDAV_USERNAME: test
|
||||||
|
WEBDAV_PASSWORD: test
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
webdav_backup_data:
|
||||||
|
name: webdav_backup_data
|
||||||
|
app_data:
|
||||||
40
test/webdav/run.sh
Executable file
40
test/webdav/run.sh
Executable file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v webdav_backup_data:/webdav_data \
|
||||||
|
alpine \
|
||||||
|
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
|
pass "Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
docker run --rm -it \
|
||||||
|
-v webdav_backup_data:/webdav_data \
|
||||||
|
alpine \
|
||||||
|
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
Reference in New Issue
Block a user