Compare commits

..

25 Commits

Author SHA1 Message Date
Frederik Ring
87ea8d0930 Improve logging 2024-01-29 16:20:50 +01:00
Frederik Ring
7d489a95e3 Rename and deprecate BACKUP_STOP_CONTAINER_LABEL 2024-01-29 16:16:44 +01:00
Frederik Ring
57e7f2af9e Reflect changes in naming 2024-01-28 20:29:08 +01:00
Frederik Ring
4639b21f3b Choose better filename 2024-01-28 20:29:08 +01:00
Frederik Ring
9acd6dc8ab Timeout when scaling down services should be configurable 2024-01-28 20:29:08 +01:00
Frederik Ring
409496af24 Timer is more suitable for timeout race 2024-01-28 20:29:08 +01:00
Frederik Ring
542d1fa69f Inline handling of in-swarm container level restart 2024-01-28 20:29:08 +01:00
Frederik Ring
2bc94d8a5b Time out after five minutes of not reaching desired container count 2024-01-28 20:29:08 +01:00
Frederik Ring
26bbc66cd5 Factor out code for service updating 2024-01-28 20:29:08 +01:00
Frederik Ring
09cc1f5c60 Move docker interaction code into own file 2024-01-28 20:29:08 +01:00
Frederik Ring
7ad6fc9355 Scale services concurrently 2024-01-28 20:29:08 +01:00
Frederik Ring
bb37b8b1d8 Add additional check if all containers have been removed 2024-01-28 20:29:08 +01:00
Frederik Ring
bf1d13b78c Document script behavior on label collision 2024-01-28 20:29:08 +01:00
Frederik Ring
538a069a70 Check whether container and service labels collide 2024-01-28 20:29:08 +01:00
Frederik Ring
78a89c1a93 Log warnings from Docker when updating services 2024-01-28 20:29:08 +01:00
Frederik Ring
94aa33369f Do not rely on PreviousSpec for storing desired replica count 2024-01-28 20:29:08 +01:00
Frederik Ring
f4497177b5 Document services stats 2024-01-28 20:29:08 +01:00
Frederik Ring
95e9e9945d Downgrade Docker CLI to match client 2024-01-28 20:29:08 +01:00
Frederik Ring
fee8cb234c Document scale-up/down approach in docs 2024-01-28 20:29:08 +01:00
Frederik Ring
b7855605d4 Clean up error and log messages 2024-01-28 20:29:08 +01:00
Frederik Ring
f14b796aab In test, label both services 2024-01-28 20:29:08 +01:00
Frederik Ring
978e900308 Use progress tool from Docker CLI 2024-01-28 20:29:08 +01:00
Frederik Ring
511b79bd43 Scale services back up 2024-01-28 20:29:08 +01:00
Frederik Ring
8ef7fa0d5d Try scaling down services 2024-01-28 20:29:08 +01:00
Frederik Ring
270ca65efa Query for labeled services as well 2024-01-28 20:29:08 +01:00
18 changed files with 142 additions and 574 deletions

View File

@@ -16,5 +16,6 @@ WORKDIR /root
RUN apk add --no-cache ca-certificates
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY --chmod=755 ./entrypoint.sh /root/
ENTRYPOINT ["/usr/bin/backup", "-foreground"]
ENTRYPOINT ["/root/entrypoint.sh"]

View File

@@ -34,7 +34,6 @@ type Config struct {
BackupFilenameExpand bool `split_words:"true"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupCronExpression string `split_words:"true" default:"@daily"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
BackupPruningPrefix string `split_words:"true"`

View File

@@ -1,82 +0,0 @@
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/joho/godotenv"
"github.com/offen/envconfig"
)
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
type envProxy func(string) (string, bool)
func loadConfig(lookup envProxy) (*Config, error) {
envconfig.Lookup = func(key string) (string, bool) {
value, okValue := lookup(key)
location, okFile := lookup(key + "_FILE")
switch {
case okValue && !okFile: // only value
return value, true
case !okValue && okFile: // only file
contents, err := os.ReadFile(location)
if err != nil {
return "", false
}
return string(contents), true
case okValue && okFile: // both
return "", false
default: // neither, ignore
return "", false
}
}
var c = &Config{}
if err := envconfig.Process("", c); err != nil {
return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err)
}
return c, nil
}
func loadEnvVars() (*Config, error) {
return loadConfig(os.LookupEnv)
}
func loadEnvFiles(directory string) ([]*Config, error) {
items, err := os.ReadDir(directory)
if err != nil {
if os.IsNotExist(err) {
return nil, err
}
return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err)
}
var cs = make([]*Config, 0)
for _, item := range items {
if item.IsDir() {
continue
}
p := filepath.Join(directory, item.Name())
envFile, err := godotenv.Read(p)
if err != nil {
return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err)
}
lookup := func(key string) (string, bool) {
val, ok := envFile[key]
return val, ok
}
c, err := loadConfig(lookup)
if err != nil {
return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err)
}
cs = append(cs, c)
}
return cs, nil
}

View File

@@ -188,18 +188,13 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
if s.cli == nil {
return cb
}
return func() (err error) {
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
err = fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
return
return func() error {
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
}
defer func() {
derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step))
if err == nil && derr != nil {
err = derr
}
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
}()
err = cb()
return
return cb()
}
}

View File

@@ -4,218 +4,63 @@
package main
import (
"flag"
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"github.com/robfig/cron/v3"
)
type command struct {
logger *slog.Logger
}
func newCommand() *command {
return &command{
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
}
}
func (c *command) must(err error) {
func main() {
s, err := newScript()
if err != nil {
c.logger.Error(
fmt.Sprintf("Fatal error running command: %v", err),
"error",
err,
)
os.Exit(1)
panic(err)
}
}
func runScript(c *Config) (err error) {
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
defer s.must(unlock())
s.must(err)
defer func() {
if derr := recover(); derr != nil {
err = fmt.Errorf("runScript: unexpected panic running script: %v", err)
}
}()
s, err := newScript(c)
if err != nil {
err = fmt.Errorf("runScript: error instantiating script: %w", err)
return
}
runErr := func() (err error) {
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
if err != nil {
err = fmt.Errorf("runScript: error acquiring file lock: %w", err)
return
}
defer func() {
derr := unlock()
if err == nil && derr != nil {
err = fmt.Errorf("runScript: error releasing file lock: %w", derr)
}
}()
scriptErr := func() error {
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
restartContainersAndServices, err := s.stopContainersAndServices()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
// similar).
defer func() {
derr := restartContainersAndServices()
if err == nil {
err = derr
}
}()
if err != nil {
return
}
err = s.createArchive()
return
})(); err != nil {
return err
}
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
return err
}
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
return err
}
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
return err
}
return nil
}()
if hookErr := s.runHooks(scriptErr); hookErr != nil {
if scriptErr != nil {
return fmt.Errorf(
"runScript: error %w executing the script followed by %w calling the registered hooks",
scriptErr,
hookErr,
if pArg := recover(); pArg != nil {
if err, ok := pArg.(error); ok {
s.logger.Error(
fmt.Sprintf("Executing the script encountered a panic: %v", err),
)
if hookErr := s.runHooks(err); hookErr != nil {
s.logger.Error(
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
)
}
os.Exit(1)
}
return fmt.Errorf(
"runScript: the script ran successfully, but an error occurred calling the registered hooks: %w",
hookErr,
)
panic(pArg)
}
if scriptErr != nil {
return fmt.Errorf("runScript: error running script: %w", scriptErr)
}
return nil
}()
if runErr != nil {
s.logger.Error(
fmt.Sprintf("Script run failed: %v", runErr), "error", runErr,
)
}
return runErr
}
func (c *command) runInForeground() error {
cr := cron.New(
cron.WithParser(
cron.NewParser(
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
),
),
)
addJob := func(config *Config) error {
if _, err := cr.AddFunc(config.BackupCronExpression, func() {
c.logger.Info(
if err := s.runHooks(nil); err != nil {
s.logger.Error(
fmt.Sprintf(
"Now running schedule %s",
config.BackupCronExpression,
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
err,
),
)
if err := runScript(config); err != nil {
c.logger.Error(
fmt.Sprintf(
"Unexpected error running schedule %s: %v",
config.BackupCronExpression,
err,
),
"error",
err,
)
}
}); err != nil {
return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err)
os.Exit(1)
}
c.logger.Info(fmt.Sprintf("Successfully scheduled backup with expression %s", config.BackupCronExpression))
return nil
}
s.logger.Info("Finished running backup tasks.")
}()
cs, err := loadEnvFiles("/etc/dockervolumebackup/conf.d")
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("runInForeground: could not load config from environment files: %w", err)
}
c, err := loadEnvVars()
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
restartContainersAndServices, err := s.stopContainersAndServices()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
// similar).
defer func() {
s.must(restartContainersAndServices())
}()
if err != nil {
return fmt.Errorf("runInForeground: could not load config from environment variables: %w", err)
} else {
err = addJob(c)
if err != nil {
return fmt.Errorf("runInForeground: error adding job from env: %w", err)
}
return err
}
} else {
c.logger.Info("/etc/dockervolumebackup/conf.d was found, using configuration files from this directory.")
for _, config := range cs {
err = addJob(config)
if err != nil {
return fmt.Errorf("runInForeground: error adding jobs from conf files: %w", err)
}
c.logger.Info(
fmt.Sprintf("Successfully scheduled backup with expression %s", config.BackupCronExpression),
)
}
}
return s.createArchive()
})())
var quit = make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
cr.Start()
<-quit
ctx := cr.Stop()
<-ctx.Done()
return nil
}
func (c *command) runAsCommand() error {
config, err := loadEnvVars()
if err != nil {
return fmt.Errorf("runAsCommand: error loading env vars: %w", err)
}
err = runScript(config)
if err != nil {
return fmt.Errorf("runAsCommand: error running script: %w", err)
}
return nil
}
func main() {
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
flag.Parse()
c := newCommand()
if *foreground {
c.must(c.runInForeground())
} else {
c.must(c.runAsCommand())
}
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/client"
"github.com/leekchan/timeutil"
"github.com/offen/envconfig"
"github.com/otiai10/copy"
"golang.org/x/sync/errgroup"
)
@@ -57,10 +58,10 @@ type script struct {
// remote resources like the Docker engine or remote storage locations. All
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript(c *Config) (*script, error) {
func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: c,
c: &Config{},
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
stats: &Stats{
StartTime: time.Now(),
@@ -82,6 +83,32 @@ func newScript(c *Config) (*script, error) {
return nil
})
envconfig.Lookup = func(key string) (string, bool) {
value, okValue := os.LookupEnv(key)
location, okFile := os.LookupEnv(key + "_FILE")
switch {
case okValue && !okFile: // only value
return value, true
case !okValue && okFile: // only file
contents, err := os.ReadFile(location)
if err != nil {
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
return "", false
}
return string(contents), true
case okValue && okFile: // both
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
return "", false
default: // neither, ignore
return "", false
}
}
if err := envconfig.Process("", s.c); err != nil {
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
}
s.file = path.Join("/tmp", s.c.BackupFilename)
tmplFileName, tErr := template.New("extension").Parse(s.file)
@@ -480,6 +507,17 @@ func (s *script) pruneBackups() error {
return nil
}
// must exits the script run prematurely in case the given error
// is non-nil.
func (s *script) must(err error) {
if err != nil {
s.logger.Error(
fmt.Sprintf("Fatal error running backup: %s", err),
)
panic(err)
}
}
// skipPrune returns true if the given backend name is contained in the
// list of skipped backends.
func skipPrune(name string, skippedBackends []string) bool {

View File

@@ -170,23 +170,26 @@ func (s *script) stopContainersAndServices() (func() error, error) {
}
}
s.logger.Info(
fmt.Sprintf(
"Stopping %d out of %d running container(s) as they were labeled %s.",
len(containersToStop),
len(allContainers),
filterMatchLabel,
),
)
if isDockerSwarm {
s.logger.Info(
fmt.Sprintf(
"Scaling down %d out of %d active service(s) as they were labeled %s.",
"Stopping %d out of %d running container(s) and scaling down %d out of %d active service(s) as they were labeled %s.",
len(containersToStop),
len(allContainers),
len(servicesToScaleDown),
len(allServices),
filterMatchLabel,
),
)
} else {
s.logger.Info(
fmt.Sprintf(
"Stopping %d out of %d running container(s) as they were labeled %s.",
len(containersToStop),
len(allContainers),
filterMatchLabel,
),
)
}
var stoppedContainers []types.Container
@@ -210,9 +213,9 @@ func (s *script) stopContainersAndServices() (func() error, error) {
warnings, err := scaleService(s.cli, svc.serviceID, 0)
if err != nil {
scaleDownErrors.append(err)
return
} else {
scaledDownServices = append(scaledDownServices, svc)
}
scaledDownServices = append(scaledDownServices, svc)
for _, warning := range warnings {
s.logger.Warn(
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", svc.serviceID, warning),
@@ -317,20 +320,21 @@ func (s *script) stopContainersAndServices() (func() error, error) {
errors.Join(allErrors...),
)
}
s.logger.Info(
fmt.Sprintf(
"Restarted %d container(s).",
len(stoppedContainers),
),
)
if isDockerSwarm {
s.logger.Info(
fmt.Sprintf(
"Scaled %d service(s) back up.",
"Restarted %d container(s) and %d service(s).",
len(stoppedContainers),
len(scaledDownServices),
),
)
} else {
s.logger.Info(
fmt.Sprintf(
"Restarted %d container(s).",
len(stoppedContainers),
),
)
}
return nil

View File

@@ -13,33 +13,5 @@ If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL
DOCKER_HOST=tcp://docker_socket_proxy:2375
```
If you do this as you seek to restrict access to the Docker socket, this tool is potentially calling the following Docker APIs:
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
| API | When |
|-|-|
| `Info` | always |
| `ContainerExecCreate` | running commands from `exec-labels` |
| `ContainerExecAttach` | running commands from `exec-labels` |
| `ContainerExecInspect` | running commands from `exec-labels` |
| `ContainerList` | always |
`ServiceList` | Docker engine is running in Swarm mode |
| `ServiceInspect` | Docker engine is running in Swarm mode |
| `ServiceUpdate` | Docker engine is running in Swarm mode and `stop-during-backup` is used |
| `ConatinerStop` | `stop-during-backup` labels are applied to containers |
| `ContainerStart` | `stop-during-backup` labels are applied to container |
---
In case you are using [`docker-socket-proxy`][proxy], this means following permissions are required:
| Permission | When |
|-|-|
| INFO | always required |
| CONTAINERS | always required |
| POST | required when using `stop-during-backup` or `exec` labels |
| EXEC | required when using `exec`-labeled commands |
| SERVICES | required when Docker Engine is running in Swarm mode |
| NODES | required when labeling services `stop-during-backup` |
| TASKS | required when labeling services `stop-during-backup` |
[proxy]: https://github.com/Tecnativa/docker-socket-proxy

View File

@@ -23,22 +23,9 @@ You can populate below template according to your requirements and use it as you
```
########### BACKUP SCHEDULE
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
#
# Field name | Mandatory? | Allowed values | Allowed special characters
# ---------- | ---------- | -------------- | --------------------------
# Seconds | No | 0-59 | * / , -
# Minutes | Yes | 0-59 | * / , -
# Hours | Yes | 0-23 | * / , -
# Day of month | Yes | 1-31 | * / , - ?
# Month | Yes | 1-12 or JAN-DEC | * / , -
# Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
#
# Month and Day-of-week field values are case insensitive.
# "SUN", "Sun", and "sun" are equally accepted.
# If no value is set, `@daily` will be used.
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
# Backups run on the given cron schedule in `busybox` flavor. If no
# value is set, `@daily` will be used. If you do not want the cron
# to ever run, use `0 0 5 31 2 ?`.
# BACKUP_CRON_EXPRESSION="0 2 * * *"

26
entrypoint.sh Normal file
View File

@@ -0,0 +1,26 @@
#!/bin/sh
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0
set -e
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
else
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
crontab -r && crontab /dev/null
for file in /etc/dockervolumebackup/conf.d/*; do
source $file
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
done
fi
echo "Starting cron in foreground."
crond -f -d 8

4
go.mod
View File

@@ -10,14 +10,12 @@ require (
github.com/docker/cli v24.0.1+incompatible
github.com/docker/docker v24.0.7+incompatible
github.com/gofrs/flock v0.8.1
github.com/joho/godotenv v1.5.1
github.com/klauspost/compress v1.17.6
github.com/klauspost/compress v1.17.4
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.66
github.com/offen/envconfig v1.5.0
github.com/otiai10/copy v1.14.0
github.com/pkg/sftp v1.13.6
github.com/robfig/cron/v3 v3.0.0
github.com/studio-b12/gowebdav v0.9.0
golang.org/x/crypto v0.18.0
golang.org/x/oauth2 v0.16.0

8
go.sum
View File

@@ -443,8 +443,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=
github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -458,8 +456,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -595,8 +593,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=

View File

@@ -13,8 +13,6 @@ docker compose up -d --quiet-pull
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100
docker compose logs backup
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
fail "Config from file was not used."
fi

View File

@@ -1,23 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: '7'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ${LOCAL_DIR:-./local}:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,34 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
export LOCAL_DIR=$(mktemp -d)
docker compose up -d --quiet-pull
sleep 5
ec=0
docker compose exec -e BACKUP_RETENTION_DAYS=7 -e BACKUP_FILENAME=test.tar.gz backup backup & \
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
if [ "$ec" = "0" ]; then
fail "Subsequent invocation exited 0"
fi
pass "Subsequent invocation did not exit 0"
sleep 5
if [ ! -f "${LOCAL_DIR}/test.tar.gz" ]; then
fail "Could not find expected tar file"
fi
pass "Found expected tar file"
if [ -f "${LOCAL_DIR}/test2.tar.gz" ]; then
fail "Subsequent invocation was expected to fail but created archive"
fi
pass "Subsequent invocation did not create archive"

View File

@@ -1,40 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
DOCKER_HOST: tcp://docker_socket_proxy:2375
volumes:
- pg_data:/backup/pg_data:ro
- ${LOCAL_DIR:-local}:/archive
docker_socket_proxy:
image: tecnativa/docker-socket-proxy:0.1
environment:
INFO: ${ALLOW_INFO:-1}
CONTAINERS: ${ALLOW_CONTAINERS:-1}
SERVICES: ${ALLOW_SERVICES:-1}
POST: ${ALLOW_POST:-1}
TASKS: ${ALLOW_TASKS:-1}
NODES: ${ALLOW_NODES:-1}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
pg_data:

View File

@@ -1,36 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
DOCKER_HOST: tcp://docker_socket_proxy:2375
volumes:
- pg_data:/backup/pg_data:ro
- ${LOCAL_DIR:-local}:/archive
docker_socket_proxy:
image: tecnativa/docker-socket-proxy:0.1
environment:
INFO: ${ALLOW_INFO:-1}
CONTAINERS: ${ALLOW_CONTAINERS:-1}
POST: ${ALLOW_POST:-1}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
volumes:
- pg_data:/var/lib/postgresql/data
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
pg_data:

View File

@@ -1,76 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
export LOCAL_DIR=$(mktemp -d)
docker compose up -d --quiet-pull
sleep 5
# The default configuration in docker-compose.yml should
# successfully create a backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
fail "Archive was not created"
fi
pass "Found relevant archive file."
# Disabling POST should make the backup run fail
ALLOW_POST="0" docker compose up -d
sleep 5
set +e
docker compose exec backup backup
if [ $? = "0" ]; then
fail "Expected invocation to exit non-zero."
fi
set -e
pass "Invocation exited non-zero."
docker compose down --volumes
# Next, the test is run against a Swarm setup
docker swarm init
export LOCAL_DIR=$(mktemp -d)
docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
sleep 20
# The default configuration in docker-compose.swarm.yml should
# successfully create a backup in Swarm mode.
docker exec $(docker ps -q -f name=backup) backup
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
fail "Archive was not created"
fi
pass "Found relevant archive file."
sleep 5
expect_running_containers "3"
# Disabling POST should make the backup run fail
ALLOW_POST="0" docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
sleep 20
set +e
docker exec $(docker ps -q -f name=backup) backup
if [ $? = "0" ]; then
fail "Expected invocation to exit non-zero."
fi
set -e
pass "Invocation exited non-zero."