mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
8 Commits
v2.36.0
...
v2.37.0-al
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7cfea62933 | ||
|
|
d14e826529 | ||
|
|
21191d601a | ||
|
|
d642a60c4d | ||
|
|
64d934102d | ||
|
|
0f224e4fb8 | ||
|
|
6029225f74 | ||
|
|
63b545787e |
@@ -16,6 +16,5 @@ WORKDIR /root
|
|||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates
|
||||||
|
|
||||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
COPY --chmod=755 ./entrypoint.sh /root/
|
|
||||||
|
|
||||||
ENTRYPOINT ["/root/entrypoint.sh"]
|
ENTRYPOINT ["/usr/bin/backup", "-foreground"]
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ type Config struct {
|
|||||||
BackupFilenameExpand bool `split_words:"true"`
|
BackupFilenameExpand bool `split_words:"true"`
|
||||||
BackupLatestSymlink string `split_words:"true"`
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
|
BackupCronExpression string `split_words:"true" default:"@daily"`
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
|
|||||||
87
cmd/backup/config_provider.go
Normal file
87
cmd/backup/config_provider.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/offen/envconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
|
||||||
|
type envProxy func(string) (string, bool)
|
||||||
|
|
||||||
|
func loadConfig(lookup envProxy) (*Config, error) {
|
||||||
|
envconfig.Lookup = func(key string) (string, bool) {
|
||||||
|
value, okValue := lookup(key)
|
||||||
|
location, okFile := lookup(key + "_FILE")
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case okValue && !okFile: // only value
|
||||||
|
return value, true
|
||||||
|
case !okValue && okFile: // only file
|
||||||
|
contents, err := os.ReadFile(location)
|
||||||
|
if err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return string(contents), true
|
||||||
|
case okValue && okFile: // both
|
||||||
|
return "", false
|
||||||
|
default: // neither, ignore
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var c = &Config{}
|
||||||
|
if err := envconfig.Process("", c); err != nil {
|
||||||
|
return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadEnvVars() (*Config, error) {
|
||||||
|
return loadConfig(os.LookupEnv)
|
||||||
|
}
|
||||||
|
|
||||||
|
type configFile struct {
|
||||||
|
name string
|
||||||
|
config *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadEnvFiles(directory string) ([]configFile, error) {
|
||||||
|
items, err := os.ReadDir(directory)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := []configFile{}
|
||||||
|
for _, item := range items {
|
||||||
|
if item.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := filepath.Join(directory, item.Name())
|
||||||
|
envFile, err := godotenv.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err)
|
||||||
|
}
|
||||||
|
lookup := func(key string) (string, bool) {
|
||||||
|
val, ok := envFile[key]
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
c, err := loadConfig(lookup)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err)
|
||||||
|
}
|
||||||
|
cs = append(cs, configFile{config: c, name: item.Name()})
|
||||||
|
}
|
||||||
|
|
||||||
|
return cs, nil
|
||||||
|
}
|
||||||
29
cmd/backup/cron.go
Normal file
29
cmd/backup/cron.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkCronSchedule detects whether the given cron expression will actually
|
||||||
|
// ever be executed or not.
|
||||||
|
func checkCronSchedule(expression string) (ok bool) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
sched, err := cron.ParseStandard(expression)
|
||||||
|
if err != nil {
|
||||||
|
ok = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
sched.Next(now) // panics when the cron would never run
|
||||||
|
ok = true
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -188,13 +188,18 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
|
|||||||
if s.cli == nil {
|
if s.cli == nil {
|
||||||
return cb
|
return cb
|
||||||
}
|
}
|
||||||
return func() error {
|
return func() (err error) {
|
||||||
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||||
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
err = fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
|
derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step))
|
||||||
|
if err == nil && derr != nil {
|
||||||
|
err = derr
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
return cb()
|
err = cb()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,63 +4,222 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
type command struct {
|
||||||
s, err := newScript()
|
logger *slog.Logger
|
||||||
if err != nil {
|
}
|
||||||
panic(err)
|
|
||||||
|
func newCommand() *command {
|
||||||
|
return &command{
|
||||||
|
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
func (c *command) must(err error) {
|
||||||
defer s.must(unlock())
|
if err != nil {
|
||||||
s.must(err)
|
c.logger.Error(
|
||||||
|
fmt.Sprintf("Fatal error running command: %v", err),
|
||||||
|
"error",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runScript(c *Config) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if pArg := recover(); pArg != nil {
|
if derr := recover(); derr != nil {
|
||||||
if err, ok := pArg.(error); ok {
|
err = fmt.Errorf("runScript: unexpected panic running script: %v", err)
|
||||||
s.logger.Error(
|
|
||||||
fmt.Sprintf("Executing the script encountered a panic: %v", err),
|
|
||||||
)
|
|
||||||
if hookErr := s.runHooks(err); hookErr != nil {
|
|
||||||
s.logger.Error(
|
|
||||||
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
panic(pArg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runHooks(nil); err != nil {
|
|
||||||
s.logger.Error(
|
|
||||||
fmt.Sprintf(
|
|
||||||
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
|
||||||
err,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
s.logger.Info("Finished running backup tasks.")
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
|
s, err := newScript(c)
|
||||||
restartContainersAndServices, err := s.stopContainersAndServices()
|
if err != nil {
|
||||||
// The mechanism for restarting containers is not using hooks as it
|
err = fmt.Errorf("runScript: error instantiating script: %w", err)
|
||||||
// should happen as soon as possible (i.e. before uploading backups or
|
return
|
||||||
// similar).
|
}
|
||||||
defer func() {
|
|
||||||
s.must(restartContainersAndServices())
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.createArchive()
|
|
||||||
})())
|
|
||||||
|
|
||||||
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
|
runErr := func() (err error) {
|
||||||
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
|
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
|
if err != nil {
|
||||||
|
err = fmt.Errorf("runScript: error acquiring file lock: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
derr := unlock()
|
||||||
|
if err == nil && derr != nil {
|
||||||
|
err = fmt.Errorf("runScript: error releasing file lock: %w", derr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
scriptErr := func() error {
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
|
||||||
|
restartContainersAndServices, err := s.stopContainersAndServices()
|
||||||
|
// The mechanism for restarting containers is not using hooks as it
|
||||||
|
// should happen as soon as possible (i.e. before uploading backups or
|
||||||
|
// similar).
|
||||||
|
defer func() {
|
||||||
|
derr := restartContainersAndServices()
|
||||||
|
if err == nil {
|
||||||
|
err = derr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = s.createArchive()
|
||||||
|
return
|
||||||
|
})(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
||||||
|
if scriptErr != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"runScript: error %w executing the script followed by %w calling the registered hooks",
|
||||||
|
scriptErr,
|
||||||
|
hookErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return fmt.Errorf(
|
||||||
|
"runScript: the script ran successfully, but an error occurred calling the registered hooks: %w",
|
||||||
|
hookErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if scriptErr != nil {
|
||||||
|
return fmt.Errorf("runScript: error running script: %w", scriptErr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
if runErr != nil {
|
||||||
|
s.logger.Error(
|
||||||
|
fmt.Sprintf("Script run failed: %v", runErr), "error", runErr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return runErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *command) runInForeground() error {
|
||||||
|
cr := cron.New(
|
||||||
|
cron.WithParser(
|
||||||
|
cron.NewParser(
|
||||||
|
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
addJob := func(config *Config, name string) error {
|
||||||
|
if _, err := cr.AddFunc(config.BackupCronExpression, func() {
|
||||||
|
c.logger.Info(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Now running script on schedule %s",
|
||||||
|
config.BackupCronExpression,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
if err := runScript(config); err != nil {
|
||||||
|
c.logger.Error(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Unexpected error running schedule %s: %v",
|
||||||
|
config.BackupCronExpression,
|
||||||
|
err,
|
||||||
|
),
|
||||||
|
"error",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", name, config.BackupCronExpression))
|
||||||
|
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
||||||
|
c.logger.Warn(
|
||||||
|
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cs, err := loadEnvFiles("/etc/dockervolumebackup/conf.d")
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("runInForeground: could not load config from environment files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := loadEnvVars()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runInForeground: could not load config from environment variables: %w", err)
|
||||||
|
} else {
|
||||||
|
err = addJob(c, "from environment")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runInForeground: error adding job from env: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Info("/etc/dockervolumebackup/conf.d was found, using configuration files from this directory.")
|
||||||
|
for _, config := range cs {
|
||||||
|
err = addJob(config.config, config.name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runInForeground: error adding jobs from conf files: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var quit = make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
cr.Start()
|
||||||
|
<-quit
|
||||||
|
ctx := cr.Stop()
|
||||||
|
<-ctx.Done()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *command) runAsCommand() error {
|
||||||
|
config, err := loadEnvVars()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runAsCommand: error loading env vars: %w", err)
|
||||||
|
}
|
||||||
|
err = runScript(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("runAsCommand: error running script: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
c := newCommand()
|
||||||
|
if *foreground {
|
||||||
|
c.must(c.runInForeground())
|
||||||
|
} else {
|
||||||
|
c.must(c.runAsCommand())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ import (
|
|||||||
"github.com/containrrr/shoutrrr/pkg/router"
|
"github.com/containrrr/shoutrrr/pkg/router"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/leekchan/timeutil"
|
"github.com/leekchan/timeutil"
|
||||||
"github.com/offen/envconfig"
|
|
||||||
"github.com/otiai10/copy"
|
"github.com/otiai10/copy"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -58,10 +57,10 @@ type script struct {
|
|||||||
// remote resources like the Docker engine or remote storage locations. All
|
// remote resources like the Docker engine or remote storage locations. All
|
||||||
// reading from env vars or other configuration sources is expected to happen
|
// reading from env vars or other configuration sources is expected to happen
|
||||||
// in this method.
|
// in this method.
|
||||||
func newScript() (*script, error) {
|
func newScript(c *Config) (*script, error) {
|
||||||
stdOut, logBuffer := buffer(os.Stdout)
|
stdOut, logBuffer := buffer(os.Stdout)
|
||||||
s := &script{
|
s := &script{
|
||||||
c: &Config{},
|
c: c,
|
||||||
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||||
stats: &Stats{
|
stats: &Stats{
|
||||||
StartTime: time.Now(),
|
StartTime: time.Now(),
|
||||||
@@ -83,32 +82,6 @@ func newScript() (*script, error) {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
envconfig.Lookup = func(key string) (string, bool) {
|
|
||||||
value, okValue := os.LookupEnv(key)
|
|
||||||
location, okFile := os.LookupEnv(key + "_FILE")
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case okValue && !okFile: // only value
|
|
||||||
return value, true
|
|
||||||
case !okValue && okFile: // only file
|
|
||||||
contents, err := os.ReadFile(location)
|
|
||||||
if err != nil {
|
|
||||||
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return string(contents), true
|
|
||||||
case okValue && okFile: // both
|
|
||||||
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
|
|
||||||
return "", false
|
|
||||||
default: // neither, ignore
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := envconfig.Process("", s.c); err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||||
|
|
||||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||||
@@ -507,17 +480,6 @@ func (s *script) pruneBackups() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// must exits the script run prematurely in case the given error
|
|
||||||
// is non-nil.
|
|
||||||
func (s *script) must(err error) {
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error(
|
|
||||||
fmt.Sprintf("Fatal error running backup: %s", err),
|
|
||||||
)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipPrune returns true if the given backend name is contained in the
|
// skipPrune returns true if the given backend name is contained in the
|
||||||
// list of skipped backends.
|
// list of skipped backends.
|
||||||
func skipPrune(name string, skippedBackends []string) bool {
|
func skipPrune(name string, skippedBackends []string) bool {
|
||||||
|
|||||||
@@ -210,9 +210,9 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
warnings, err := scaleService(s.cli, svc.serviceID, 0)
|
warnings, err := scaleService(s.cli, svc.serviceID, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
scaleDownErrors.append(err)
|
scaleDownErrors.append(err)
|
||||||
} else {
|
return
|
||||||
scaledDownServices = append(scaledDownServices, svc)
|
|
||||||
}
|
}
|
||||||
|
scaledDownServices = append(scaledDownServices, svc)
|
||||||
for _, warning := range warnings {
|
for _, warning := range warnings {
|
||||||
s.logger.Warn(
|
s.logger.Warn(
|
||||||
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", svc.serviceID, warning),
|
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", svc.serviceID, warning),
|
||||||
|
|||||||
@@ -13,5 +13,33 @@ If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL
|
|||||||
DOCKER_HOST=tcp://docker_socket_proxy:2375
|
DOCKER_HOST=tcp://docker_socket_proxy:2375
|
||||||
```
|
```
|
||||||
|
|
||||||
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
|
If you do this as you seek to restrict access to the Docker socket, this tool is potentially calling the following Docker APIs:
|
||||||
|
|
||||||
|
| API | When |
|
||||||
|
|-|-|
|
||||||
|
| `Info` | always |
|
||||||
|
| `ContainerExecCreate` | running commands from `exec-labels` |
|
||||||
|
| `ContainerExecAttach` | running commands from `exec-labels` |
|
||||||
|
| `ContainerExecInspect` | running commands from `exec-labels` |
|
||||||
|
| `ContainerList` | always |
|
||||||
|
`ServiceList` | Docker engine is running in Swarm mode |
|
||||||
|
| `ServiceInspect` | Docker engine is running in Swarm mode |
|
||||||
|
| `ServiceUpdate` | Docker engine is running in Swarm mode and `stop-during-backup` is used |
|
||||||
|
| `ConatinerStop` | `stop-during-backup` labels are applied to containers |
|
||||||
|
| `ContainerStart` | `stop-during-backup` labels are applied to container |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
In case you are using [`docker-socket-proxy`][proxy], this means following permissions are required:
|
||||||
|
|
||||||
|
| Permission | When |
|
||||||
|
|-|-|
|
||||||
|
| INFO | always required |
|
||||||
|
| CONTAINERS | always required |
|
||||||
|
| POST | required when using `stop-during-backup` or `exec` labels |
|
||||||
|
| EXEC | required when using `exec`-labeled commands |
|
||||||
|
| SERVICES | required when Docker Engine is running in Swarm mode |
|
||||||
|
| NODES | required when labeling services `stop-during-backup` |
|
||||||
|
| TASKS | required when labeling services `stop-during-backup` |
|
||||||
|
|
||||||
|
[proxy]: https://github.com/Tecnativa/docker-socket-proxy
|
||||||
|
|||||||
@@ -23,9 +23,22 @@ You can populate below template according to your requirements and use it as you
|
|||||||
```
|
```
|
||||||
########### BACKUP SCHEDULE
|
########### BACKUP SCHEDULE
|
||||||
|
|
||||||
# Backups run on the given cron schedule in `busybox` flavor. If no
|
|
||||||
# value is set, `@daily` will be used. If you do not want the cron
|
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
|
||||||
# to ever run, use `0 0 5 31 2 ?`.
|
#
|
||||||
|
# Field name | Mandatory? | Allowed values | Allowed special characters
|
||||||
|
# ---------- | ---------- | -------------- | --------------------------
|
||||||
|
# Seconds | No | 0-59 | * / , -
|
||||||
|
# Minutes | Yes | 0-59 | * / , -
|
||||||
|
# Hours | Yes | 0-23 | * / , -
|
||||||
|
# Day of month | Yes | 1-31 | * / , - ?
|
||||||
|
# Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||||
|
# Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||||
|
#
|
||||||
|
# Month and Day-of-week field values are case insensitive.
|
||||||
|
# "SUN", "Sun", and "sun" are equally accepted.
|
||||||
|
# If no value is set, `@daily` will be used.
|
||||||
|
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
|
||||||
|
|
||||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
|
|
||||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
|
||||||
|
|
||||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
|
||||||
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
|
||||||
else
|
|
||||||
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
|
|
||||||
|
|
||||||
crontab -r && crontab /dev/null
|
|
||||||
for file in /etc/dockervolumebackup/conf.d/*; do
|
|
||||||
source $file
|
|
||||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
|
||||||
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
|
|
||||||
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Starting cron in foreground."
|
|
||||||
crond -f -d 8
|
|
||||||
4
go.mod
4
go.mod
@@ -10,12 +10,14 @@ require (
|
|||||||
github.com/docker/cli v24.0.1+incompatible
|
github.com/docker/cli v24.0.1+incompatible
|
||||||
github.com/docker/docker v24.0.7+incompatible
|
github.com/docker/docker v24.0.7+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/klauspost/compress v1.17.5
|
github.com/joho/godotenv v1.5.1
|
||||||
|
github.com/klauspost/compress v1.17.6
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.66
|
github.com/minio/minio-go/v7 v7.0.66
|
||||||
github.com/offen/envconfig v1.5.0
|
github.com/offen/envconfig v1.5.0
|
||||||
github.com/otiai10/copy v1.14.0
|
github.com/otiai10/copy v1.14.0
|
||||||
github.com/pkg/sftp v1.13.6
|
github.com/pkg/sftp v1.13.6
|
||||||
|
github.com/robfig/cron/v3 v3.0.0
|
||||||
github.com/studio-b12/gowebdav v0.9.0
|
github.com/studio-b12/gowebdav v0.9.0
|
||||||
golang.org/x/crypto v0.18.0
|
golang.org/x/crypto v0.18.0
|
||||||
golang.org/x/oauth2 v0.16.0
|
golang.org/x/oauth2 v0.16.0
|
||||||
|
|||||||
8
go.sum
8
go.sum
@@ -443,6 +443,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
|
|||||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=
|
github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=
|
||||||
github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk=
|
github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk=
|
||||||
|
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||||
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
@@ -456,8 +458,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
|||||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E=
|
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||||
github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
@@ -593,6 +595,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
|
||||||
|
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ docker compose up -d --quiet-pull
|
|||||||
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||||
sleep 100
|
sleep 100
|
||||||
|
|
||||||
|
docker compose logs backup
|
||||||
|
|
||||||
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
||||||
fail "Config from file was not used."
|
fail "Config from file was not used."
|
||||||
fi
|
fi
|
||||||
|
|||||||
23
test/lock/docker-compose.yml
Normal file
23
test/lock/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: '7'
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
34
test/lock/run.sh
Executable file
34
test/lock/run.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
ec=0
|
||||||
|
|
||||||
|
docker compose exec -e BACKUP_RETENTION_DAYS=7 -e BACKUP_FILENAME=test.tar.gz backup backup & \
|
||||||
|
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
|
||||||
|
|
||||||
|
if [ "$ec" = "0" ]; then
|
||||||
|
fail "Subsequent invocation exited 0"
|
||||||
|
fi
|
||||||
|
pass "Subsequent invocation did not exit 0"
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
if [ ! -f "${LOCAL_DIR}/test.tar.gz" ]; then
|
||||||
|
fail "Could not find expected tar file"
|
||||||
|
fi
|
||||||
|
pass "Found expected tar file"
|
||||||
|
|
||||||
|
if [ -f "${LOCAL_DIR}/test2.tar.gz" ]; then
|
||||||
|
fail "Subsequent invocation was expected to fail but created archive"
|
||||||
|
fi
|
||||||
|
pass "Subsequent invocation did not create archive"
|
||||||
40
test/proxy/docker-compose.swarm.yml
Normal file
40
test/proxy/docker-compose.swarm.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||||
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
DOCKER_HOST: tcp://docker_socket_proxy:2375
|
||||||
|
volumes:
|
||||||
|
- pg_data:/backup/pg_data:ro
|
||||||
|
- ${LOCAL_DIR:-local}:/archive
|
||||||
|
|
||||||
|
docker_socket_proxy:
|
||||||
|
image: tecnativa/docker-socket-proxy:0.1
|
||||||
|
environment:
|
||||||
|
INFO: ${ALLOW_INFO:-1}
|
||||||
|
CONTAINERS: ${ALLOW_CONTAINERS:-1}
|
||||||
|
SERVICES: ${ALLOW_SERVICES:-1}
|
||||||
|
POST: ${ALLOW_POST:-1}
|
||||||
|
TASKS: ${ALLOW_TASKS:-1}
|
||||||
|
NODES: ${ALLOW_NODES:-1}
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
pg:
|
||||||
|
image: postgres:14-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: example
|
||||||
|
volumes:
|
||||||
|
- pg_data:/var/lib/postgresql/data
|
||||||
|
deploy:
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
pg_data:
|
||||||
36
test/proxy/docker-compose.yml
Normal file
36
test/proxy/docker-compose.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||||
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
DOCKER_HOST: tcp://docker_socket_proxy:2375
|
||||||
|
volumes:
|
||||||
|
- pg_data:/backup/pg_data:ro
|
||||||
|
- ${LOCAL_DIR:-local}:/archive
|
||||||
|
|
||||||
|
docker_socket_proxy:
|
||||||
|
image: tecnativa/docker-socket-proxy:0.1
|
||||||
|
environment:
|
||||||
|
INFO: ${ALLOW_INFO:-1}
|
||||||
|
CONTAINERS: ${ALLOW_CONTAINERS:-1}
|
||||||
|
POST: ${ALLOW_POST:-1}
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
pg:
|
||||||
|
image: postgres:14-alpine
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: example
|
||||||
|
volumes:
|
||||||
|
- pg_data:/var/lib/postgresql/data
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
pg_data:
|
||||||
76
test/proxy/run.sh
Executable file
76
test/proxy/run.sh
Executable file
@@ -0,0 +1,76 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
# The default configuration in docker-compose.yml should
|
||||||
|
# successfully create a backup.
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||||
|
fail "Archive was not created"
|
||||||
|
fi
|
||||||
|
pass "Found relevant archive file."
|
||||||
|
|
||||||
|
# Disabling POST should make the backup run fail
|
||||||
|
ALLOW_POST="0" docker compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
set +e
|
||||||
|
docker compose exec backup backup
|
||||||
|
if [ $? = "0" ]; then
|
||||||
|
fail "Expected invocation to exit non-zero."
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
|
pass "Invocation exited non-zero."
|
||||||
|
|
||||||
|
docker compose down --volumes
|
||||||
|
|
||||||
|
# Next, the test is run against a Swarm setup
|
||||||
|
|
||||||
|
docker swarm init
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
# The default configuration in docker-compose.swarm.yml should
|
||||||
|
# successfully create a backup in Swarm mode.
|
||||||
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
|
||||||
|
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||||
|
fail "Archive was not created"
|
||||||
|
fi
|
||||||
|
|
||||||
|
pass "Found relevant archive file."
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
# Disabling POST should make the backup run fail
|
||||||
|
ALLOW_POST="0" docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
|
set +e
|
||||||
|
docker exec $(docker ps -q -f name=backup) backup
|
||||||
|
if [ $? = "0" ]; then
|
||||||
|
fail "Expected invocation to exit non-zero."
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
|
|
||||||
|
pass "Invocation exited non-zero."
|
||||||
Reference in New Issue
Block a user