mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a1e885138 | ||
|
|
241b5d2f25 | ||
|
|
aab47509d9 | ||
|
|
9b52c1f63e | ||
|
|
164d6df3b4 | ||
|
|
4c74313222 | ||
|
|
de03d4f704 | ||
|
|
65626dd3d4 | ||
|
|
69eceb3982 | ||
|
|
1d45062100 | ||
|
|
64d934102d | ||
|
|
0f224e4fb8 | ||
|
|
6029225f74 | ||
|
|
63b545787e | ||
|
|
c3daeacecb | ||
|
|
2065fb2815 | ||
|
|
97e5aa42cc | ||
|
|
ed5abd5ba8 | ||
|
|
810c8871ec | ||
|
|
67e3b79709 | ||
|
|
b51b25997b | ||
|
|
bf44369915 | ||
|
|
7e1ee21ef9 | ||
|
|
0fbc0637ed | ||
|
|
b38bb749c0 | ||
|
|
64daf7b132 | ||
|
|
06792eb1f0 | ||
|
|
da6683a98f | ||
|
|
be1901d181 | ||
|
|
4d7d2e50cf | ||
|
|
caa27d477f | ||
|
|
58573e6733 | ||
|
|
84990ed6bd | ||
|
|
94f0975a30 | ||
|
|
e5c3b47ec9 | ||
|
|
619624f0d0 | ||
|
|
52cd70c7a9 | ||
|
|
55bcd90c2d | ||
|
|
382a613cbc |
2
.github/workflows/deploy-docs.yml
vendored
2
.github/workflows/deploy-docs.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
|
||||
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@@ -15,8 +15,8 @@ jobs:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: false
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
@@ -9,13 +9,12 @@ RUN go mod download
|
||||
WORKDIR /app/cmd/backup
|
||||
RUN go build -o backup .
|
||||
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.19
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||
COPY --chmod=755 ./entrypoint.sh /root/
|
||||
|
||||
ENTRYPOINT ["/root/entrypoint.sh"]
|
||||
ENTRYPOINT ["/usr/bin/backup", "-foreground"]
|
||||
|
||||
@@ -34,10 +34,13 @@ type Config struct {
|
||||
BackupFilenameExpand bool `split_words:"true"`
|
||||
BackupLatestSymlink string `split_words:"true"`
|
||||
BackupArchive string `split_words:"true" default:"/archive"`
|
||||
BackupCronExpression string `split_words:"true" default:"@daily"`
|
||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||
BackupPruningPrefix string `split_words:"true"`
|
||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||
BackupStopContainerLabel string `split_words:"true"`
|
||||
BackupStopDuringBackupLabel string `split_words:"true" default:"true"`
|
||||
BackupStopServiceTimeout time.Duration `split_words:"true" default:"5m"`
|
||||
BackupFromSnapshot bool `split_words:"true"`
|
||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||
|
||||
91
cmd/backup/config_provider.go
Normal file
91
cmd/backup/config_provider.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/offen/envconfig"
|
||||
)
|
||||
|
||||
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
|
||||
type envProxy func(string) (string, bool)
|
||||
|
||||
func loadConfig(lookup envProxy) (*Config, error) {
|
||||
envconfig.Lookup = func(key string) (string, bool) {
|
||||
value, okValue := lookup(key)
|
||||
location, okFile := lookup(key + "_FILE")
|
||||
|
||||
switch {
|
||||
case okValue && !okFile: // only value
|
||||
return value, true
|
||||
case !okValue && okFile: // only file
|
||||
contents, err := os.ReadFile(location)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return string(contents), true
|
||||
case okValue && okFile: // both
|
||||
return "", false
|
||||
default: // neither, ignore
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
var c = &Config{}
|
||||
if err := envconfig.Process("", c); err != nil {
|
||||
return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func loadEnvVars() (*Config, error) {
|
||||
return loadConfig(os.LookupEnv)
|
||||
}
|
||||
|
||||
type configFile struct {
|
||||
name string
|
||||
config *Config
|
||||
additionalEnvVars map[string]string
|
||||
}
|
||||
|
||||
func loadEnvFiles(directory string) ([]configFile, error) {
|
||||
items, err := os.ReadDir(directory)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err)
|
||||
}
|
||||
|
||||
cs := []configFile{}
|
||||
for _, item := range items {
|
||||
if item.IsDir() {
|
||||
continue
|
||||
}
|
||||
p := filepath.Join(directory, item.Name())
|
||||
envFile, err := godotenv.Read(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err)
|
||||
}
|
||||
lookup := func(key string) (string, bool) {
|
||||
val, ok := envFile[key]
|
||||
if ok {
|
||||
return val, ok
|
||||
}
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
c, err := loadConfig(lookup)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err)
|
||||
}
|
||||
cs = append(cs, configFile{config: c, name: item.Name(), additionalEnvVars: envFile})
|
||||
}
|
||||
|
||||
return cs, nil
|
||||
}
|
||||
29
cmd/backup/cron.go
Normal file
29
cmd/backup/cron.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2024 - Offen Authors <hioffen@posteo.de>
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
// checkCronSchedule detects whether the given cron expression will actually
|
||||
// ever be executed or not.
|
||||
func checkCronSchedule(expression string) (ok bool) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
sched, err := cron.ParseStandard(expression)
|
||||
if err != nil {
|
||||
ok = false
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
sched.Next(now) // panics when the cron would never run
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
@@ -188,13 +188,18 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
|
||||
if s.cli == nil {
|
||||
return cb
|
||||
}
|
||||
return func() error {
|
||||
if err := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||
return fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||
return func() (err error) {
|
||||
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||
err = fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
s.must(s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)))
|
||||
derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step))
|
||||
if err == nil && derr != nil {
|
||||
err = derr
|
||||
}
|
||||
}()
|
||||
return cb()
|
||||
err = cb()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,60 +4,240 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
s, err := newScript()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
type command struct {
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func newCommand() *command {
|
||||
return &command{
|
||||
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
|
||||
}
|
||||
}
|
||||
|
||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||
defer s.must(unlock())
|
||||
s.must(err)
|
||||
func (c *command) must(err error) {
|
||||
if err != nil {
|
||||
c.logger.Error(
|
||||
fmt.Sprintf("Fatal error running command: %v", err),
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func runScript(c *Config, envVars map[string]string) (err error) {
|
||||
defer func() {
|
||||
if pArg := recover(); pArg != nil {
|
||||
if err, ok := pArg.(error); ok {
|
||||
if hookErr := s.runHooks(err); hookErr != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
|
||||
)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
panic(pArg)
|
||||
if derr := recover(); derr != nil {
|
||||
err = fmt.Errorf("runScript: unexpected panic running script: %v", err)
|
||||
}
|
||||
|
||||
if err := s.runHooks(nil); err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf(
|
||||
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
||||
err,
|
||||
),
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
s.logger.Info("Finished running backup tasks.")
|
||||
}()
|
||||
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
|
||||
restartContainers, err := s.stopContainers()
|
||||
// The mechanism for restarting containers is not using hooks as it
|
||||
// should happen as soon as possible (i.e. before uploading backups or
|
||||
// similar).
|
||||
defer func() {
|
||||
s.must(restartContainers())
|
||||
}()
|
||||
s, unlock, err := newScript(c, envVars)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("runScript: error instantiating script: %w", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
derr := unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
err = fmt.Errorf("runScript: error releasing file lock: %w", derr)
|
||||
}
|
||||
return s.createArchive()
|
||||
})())
|
||||
}()
|
||||
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
|
||||
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
|
||||
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
|
||||
runErr := func() (err error) {
|
||||
scriptErr := func() error {
|
||||
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
|
||||
restartContainersAndServices, err := s.stopContainersAndServices()
|
||||
// The mechanism for restarting containers is not using hooks as it
|
||||
// should happen as soon as possible (i.e. before uploading backups or
|
||||
// similar).
|
||||
defer func() {
|
||||
derr := restartContainersAndServices()
|
||||
if err == nil {
|
||||
err = derr
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = s.createArchive()
|
||||
return
|
||||
})(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
||||
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
||||
if scriptErr != nil {
|
||||
return fmt.Errorf(
|
||||
"runScript: error %w executing the script followed by %w calling the registered hooks",
|
||||
scriptErr,
|
||||
hookErr,
|
||||
)
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"runScript: the script ran successfully, but an error occurred calling the registered hooks: %w",
|
||||
hookErr,
|
||||
)
|
||||
}
|
||||
if scriptErr != nil {
|
||||
return fmt.Errorf("runScript: error running script: %w", scriptErr)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
||||
if runErr != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("Script run failed: %v", runErr), "error", runErr,
|
||||
)
|
||||
}
|
||||
return runErr
|
||||
}
|
||||
|
||||
func (c *command) runInForeground(profileCronExpression string) error {
|
||||
cr := cron.New(
|
||||
cron.WithParser(
|
||||
cron.NewParser(
|
||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
addJob := func(config *Config, name string, envVars map[string]string) error {
|
||||
if _, err := cr.AddFunc(config.BackupCronExpression, func() {
|
||||
c.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Now running script on schedule %s",
|
||||
config.BackupCronExpression,
|
||||
),
|
||||
)
|
||||
|
||||
if err := runScript(config, envVars); err != nil {
|
||||
c.logger.Error(
|
||||
fmt.Sprintf(
|
||||
"Unexpected error running schedule %s: %v",
|
||||
config.BackupCronExpression,
|
||||
err,
|
||||
),
|
||||
"error",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}); err != nil {
|
||||
return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err)
|
||||
}
|
||||
|
||||
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", name, config.BackupCronExpression))
|
||||
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
||||
c.logger.Warn(
|
||||
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
cs, err := loadEnvFiles("/etc/dockervolumebackup/conf.d")
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("runInForeground: could not load config from environment files: %w", err)
|
||||
}
|
||||
|
||||
c, err := loadEnvVars()
|
||||
if err != nil {
|
||||
return fmt.Errorf("runInForeground: could not load config from environment variables: %w", err)
|
||||
} else {
|
||||
err = addJob(c, "from environment", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runInForeground: error adding job from env: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
c.logger.Info("/etc/dockervolumebackup/conf.d was found, using configuration files from this directory.")
|
||||
for _, config := range cs {
|
||||
err = addJob(config.config, config.name, config.additionalEnvVars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runInForeground: error adding jobs from conf files: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if profileCronExpression != "" {
|
||||
if _, err := cr.AddFunc(profileCronExpression, func() {
|
||||
memStats := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&memStats)
|
||||
c.logger.Info(
|
||||
"Collecting runtime information",
|
||||
"num_goroutines",
|
||||
runtime.NumGoroutine(),
|
||||
"memory_heap_alloc",
|
||||
formatBytes(memStats.HeapAlloc, false),
|
||||
"memory_heap_inuse",
|
||||
formatBytes(memStats.HeapInuse, false),
|
||||
"memory_heap_sys",
|
||||
formatBytes(memStats.HeapSys, false),
|
||||
"memory_heap_objects",
|
||||
memStats.HeapObjects,
|
||||
)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("runInForeground: error adding profiling job: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var quit = make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
||||
cr.Start()
|
||||
<-quit
|
||||
ctx := cr.Stop()
|
||||
<-ctx.Done()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *command) runAsCommand() error {
|
||||
config, err := loadEnvVars()
|
||||
if err != nil {
|
||||
return fmt.Errorf("runAsCommand: error loading env vars: %w", err)
|
||||
}
|
||||
err = runScript(config, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("runAsCommand: error running script: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
|
||||
profile := flag.String("profile", "", "collect runtime metrics and log them periodically on the given cron expression")
|
||||
flag.Parse()
|
||||
|
||||
c := newCommand()
|
||||
if *foreground {
|
||||
c.must(c.runInForeground(*profile))
|
||||
} else {
|
||||
c.must(c.runAsCommand())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -84,7 +85,9 @@ var templateHelpers = template.FuncMap{
|
||||
"formatBytesBin": func(bytes uint64) string {
|
||||
return formatBytes(bytes, false)
|
||||
},
|
||||
"env": os.Getenv,
|
||||
"env": os.Getenv,
|
||||
"toJson": toJson,
|
||||
"toPrettyJson": toPrettyJson,
|
||||
}
|
||||
|
||||
// formatBytes converts an amount of bytes in a human-readable representation
|
||||
@@ -106,3 +109,21 @@ func formatBytes(b uint64, decimal bool) string {
|
||||
}
|
||||
return fmt.Sprintf(format, float64(b)/float64(div), "kMGTPE"[exp])
|
||||
}
|
||||
|
||||
func toJson(v interface{}) string {
|
||||
var bytes []byte
|
||||
var err error
|
||||
if bytes, err = json.Marshal(v); err != nil {
|
||||
return fmt.Sprintf("failed to marshal JSON in notification template: %v", err)
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func toPrettyJson(v interface{}) string {
|
||||
var bytes []byte
|
||||
var err error
|
||||
if bytes, err = json.MarshalIndent(v, "", " "); err != nil {
|
||||
return fmt.Sprintf("failed to marshal indent JSON in notification template: %v", err)
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
@@ -27,16 +25,11 @@ import (
|
||||
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/webdav"
|
||||
|
||||
"github.com/ProtonMail/go-crypto/openpgp"
|
||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/containrrr/shoutrrr/pkg/router"
|
||||
"github.com/docker/docker/api/types"
|
||||
ctr "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/leekchan/timeutil"
|
||||
"github.com/offen/envconfig"
|
||||
"github.com/otiai10/copy"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -64,10 +57,10 @@ type script struct {
|
||||
// remote resources like the Docker engine or remote storage locations. All
|
||||
// reading from env vars or other configuration sources is expected to happen
|
||||
// in this method.
|
||||
func newScript() (*script, error) {
|
||||
func newScript(c *Config, envVars map[string]string) (*script, func() error, error) {
|
||||
stdOut, logBuffer := buffer(os.Stdout)
|
||||
s := &script{
|
||||
c: &Config{},
|
||||
c: c,
|
||||
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||
stats: &Stats{
|
||||
StartTime: time.Now(),
|
||||
@@ -83,50 +76,49 @@ func newScript() (*script, error) {
|
||||
},
|
||||
}
|
||||
|
||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||
if err != nil {
|
||||
return nil, noop, fmt.Errorf("runScript: error acquiring file lock: %w", err)
|
||||
}
|
||||
|
||||
for key, value := range envVars {
|
||||
currentVal, currentOk := os.LookupEnv(key)
|
||||
defer func(currentKey, currentVal string, currentOk bool) {
|
||||
if !currentOk {
|
||||
_ = os.Unsetenv(currentKey)
|
||||
} else {
|
||||
_ = os.Setenv(currentKey, currentVal)
|
||||
}
|
||||
s.logger.Info(fmt.Sprintf("unset %v: %v", currentKey, currentVal))
|
||||
}(key, currentVal, currentOk)
|
||||
|
||||
if err := os.Setenv(key, value); err != nil {
|
||||
return nil, unlock, fmt.Errorf(
|
||||
"Unexpected error overloading environment %s: %w",
|
||||
s.c.BackupCronExpression,
|
||||
err,
|
||||
)
|
||||
}
|
||||
s.logger.Info(fmt.Sprintf("set %v: %v", key, value))
|
||||
}
|
||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||
s.stats.EndTime = time.Now()
|
||||
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
|
||||
return nil
|
||||
})
|
||||
|
||||
envconfig.Lookup = func(key string) (string, bool) {
|
||||
value, okValue := os.LookupEnv(key)
|
||||
location, okFile := os.LookupEnv(key + "_FILE")
|
||||
|
||||
switch {
|
||||
case okValue && !okFile: // only value
|
||||
return value, true
|
||||
case !okValue && okFile: // only file
|
||||
contents, err := os.ReadFile(location)
|
||||
if err != nil {
|
||||
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
|
||||
return "", false
|
||||
}
|
||||
return string(contents), true
|
||||
case okValue && okFile: // both
|
||||
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
|
||||
return "", false
|
||||
default: // neither, ignore
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
if err := envconfig.Process("", s.c); err != nil {
|
||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
||||
}
|
||||
|
||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||
|
||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||
if tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
||||
return nil, unlock, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
||||
}
|
||||
|
||||
var bf bytes.Buffer
|
||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||
}); tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
||||
return nil, unlock, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
||||
}
|
||||
s.file = bf.String()
|
||||
|
||||
@@ -137,14 +129,20 @@ func newScript() (*script, error) {
|
||||
}
|
||||
s.file = timeutil.Strftime(&s.stats.StartTime, s.file)
|
||||
|
||||
_, err := os.Stat("/var/run/docker.sock")
|
||||
_, err = os.Stat("/var/run/docker.sock")
|
||||
_, dockerHostSet := os.LookupEnv("DOCKER_HOST")
|
||||
if !os.IsNotExist(err) || dockerHostSet {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: failed to create docker client")
|
||||
return nil, unlock, fmt.Errorf("newScript: failed to create docker client")
|
||||
}
|
||||
s.cli = cli
|
||||
s.registerHook(hookLevelPlumbing, func(err error) error {
|
||||
if err := s.cli.Close(); err != nil {
|
||||
return fmt.Errorf("newScript: failed to close docker client: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||
@@ -174,7 +172,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, s3Backend)
|
||||
}
|
||||
@@ -189,7 +187,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, webdavBackend)
|
||||
}
|
||||
@@ -206,7 +204,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, sshBackend)
|
||||
}
|
||||
@@ -230,7 +228,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, azureBackend)
|
||||
}
|
||||
@@ -247,7 +245,7 @@ func newScript() (*script, error) {
|
||||
}
|
||||
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, dropboxBackend)
|
||||
}
|
||||
@@ -273,14 +271,14 @@ func newScript() (*script, error) {
|
||||
|
||||
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
||||
return nil, unlock, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
||||
}
|
||||
s.hookLevel = hookLevel
|
||||
|
||||
if len(s.c.NotificationURLs) > 0 {
|
||||
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||
if senderErr != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
||||
return nil, unlock, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
||||
}
|
||||
s.sender = sender
|
||||
|
||||
@@ -288,13 +286,13 @@ func newScript() (*script, error) {
|
||||
tmpl.Funcs(templateHelpers)
|
||||
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
||||
}
|
||||
|
||||
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
||||
return nil, unlock, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
||||
}
|
||||
}
|
||||
s.template = tmpl
|
||||
@@ -315,127 +313,7 @@ func newScript() (*script, error) {
|
||||
})
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// stopContainers stops all Docker containers that are marked as to being
|
||||
// stopped during the backup and returns a function that can be called to
|
||||
// restart everything that has been stopped.
|
||||
func (s *script) stopContainers() (func() error, error) {
|
||||
if s.cli == nil {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
|
||||
}
|
||||
|
||||
containerLabel := fmt.Sprintf(
|
||||
"docker-volume-backup.stop-during-backup=%s",
|
||||
s.c.BackupStopContainerLabel,
|
||||
)
|
||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: containerLabel,
|
||||
}),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("stopContainers: error querying for containers to stop: %w", err)
|
||||
}
|
||||
|
||||
if len(containersToStop) == 0 {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||
len(containersToStop),
|
||||
containerLabel,
|
||||
len(allContainers),
|
||||
),
|
||||
)
|
||||
|
||||
var stoppedContainers []types.Container
|
||||
var stopErrors []error
|
||||
for _, container := range containersToStop {
|
||||
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
|
||||
stopErrors = append(stopErrors, err)
|
||||
} else {
|
||||
stoppedContainers = append(stoppedContainers, container)
|
||||
}
|
||||
}
|
||||
|
||||
var stopError error
|
||||
if len(stopErrors) != 0 {
|
||||
stopError = fmt.Errorf(
|
||||
"stopContainers: %d error(s) stopping containers: %w",
|
||||
len(stopErrors),
|
||||
errors.Join(stopErrors...),
|
||||
)
|
||||
}
|
||||
|
||||
s.stats.Containers = ContainersStats{
|
||||
All: uint(len(allContainers)),
|
||||
ToStop: uint(len(containersToStop)),
|
||||
Stopped: uint(len(stoppedContainers)),
|
||||
}
|
||||
|
||||
return func() error {
|
||||
servicesRequiringUpdate := map[string]struct{}{}
|
||||
|
||||
var restartErrors []error
|
||||
for _, container := range stoppedContainers {
|
||||
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
|
||||
servicesRequiringUpdate[swarmServiceName] = struct{}{}
|
||||
continue
|
||||
}
|
||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(servicesRequiringUpdate) != 0 {
|
||||
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||
for serviceName := range servicesRequiringUpdate {
|
||||
var serviceMatch swarm.Service
|
||||
for _, service := range services {
|
||||
if service.Spec.Name == serviceName {
|
||||
serviceMatch = service
|
||||
break
|
||||
}
|
||||
}
|
||||
if serviceMatch.ID == "" {
|
||||
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
|
||||
}
|
||||
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
|
||||
if _, err := s.cli.ServiceUpdate(
|
||||
context.Background(), serviceMatch.ID,
|
||||
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
||||
); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(restartErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"stopContainers: %d error(s) restarting containers and services: %w",
|
||||
len(restartErrors),
|
||||
errors.Join(restartErrors...),
|
||||
)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Restarted %d container(s) and the matching service(s).",
|
||||
len(stoppedContainers),
|
||||
),
|
||||
)
|
||||
return nil
|
||||
}, stopError
|
||||
return s, unlock, nil
|
||||
}
|
||||
|
||||
// createArchive creates a tar archive of the configured backup location and
|
||||
@@ -448,7 +326,7 @@ func (s *script) createArchive() error {
|
||||
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||
)
|
||||
s.logger.Warn(
|
||||
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
|
||||
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
|
||||
)
|
||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||
@@ -540,7 +418,6 @@ func (s *script) encryptArchive() error {
|
||||
|
||||
_, name := path.Split(s.file)
|
||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||
IsBinary: true,
|
||||
FileName: name,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -634,17 +511,6 @@ func (s *script) pruneBackups() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// must exits the script run prematurely in case the given error
|
||||
// is non-nil.
|
||||
func (s *script) must(err error) {
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("Fatal error running backup: %s", err),
|
||||
)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// skipPrune returns true if the given backend name is contained in the
|
||||
// list of skipped backends.
|
||||
func skipPrune(name string, skippedBackends []string) bool {
|
||||
|
||||
@@ -17,6 +17,15 @@ type ContainersStats struct {
|
||||
StopErrors uint
|
||||
}
|
||||
|
||||
// ServicesStats contains info about Swarm services that have been
|
||||
// operated upon
|
||||
type ServicesStats struct {
|
||||
All uint
|
||||
ToScaleDown uint
|
||||
ScaledDown uint
|
||||
ScaleDownErrors uint
|
||||
}
|
||||
|
||||
// BackupFileStats stats about the created backup file
|
||||
type BackupFileStats struct {
|
||||
Name string
|
||||
@@ -40,6 +49,7 @@ type Stats struct {
|
||||
LockedTime time.Duration
|
||||
LogOutput *bytes.Buffer
|
||||
Containers ContainersStats
|
||||
Services ServicesStats
|
||||
BackupFile BackupFileStats
|
||||
Storages map[string]StorageStats
|
||||
}
|
||||
|
||||
338
cmd/backup/stop_restart.go
Normal file
338
cmd/backup/stop_restart.go
Normal file
@@ -0,0 +1,338 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command/service/progress"
|
||||
"github.com/docker/docker/api/types"
|
||||
ctr "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
||||
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err)
|
||||
}
|
||||
serviceMode := &service.Spec.Mode
|
||||
switch {
|
||||
case serviceMode.Replicated != nil:
|
||||
serviceMode.Replicated.Replicas = &replicas
|
||||
default:
|
||||
return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name)
|
||||
}
|
||||
|
||||
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scaleService: error updating service: %w", err)
|
||||
}
|
||||
|
||||
discardWriter := &noopWriteCloser{io.Discard}
|
||||
if err := progress.ServiceProgress(context.Background(), cli, service.ID, discardWriter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return response.Warnings, nil
|
||||
}
|
||||
|
||||
func awaitContainerCountForService(cli *client.Client, serviceID string, count int, timeoutAfter time.Duration) error {
|
||||
poll := time.NewTicker(time.Second)
|
||||
timeout := time.NewTimer(timeoutAfter)
|
||||
defer timeout.Stop()
|
||||
defer poll.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf(
|
||||
"awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d",
|
||||
timeoutAfter,
|
||||
serviceID,
|
||||
count,
|
||||
)
|
||||
case <-poll.C:
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("awaitContainerCount: error listing containers: %w", err)
|
||||
}
|
||||
if len(containers) == count {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stopContainersAndServices stops all Docker containers that are marked as to being
|
||||
// stopped during the backup and returns a function that can be called to
|
||||
// restart everything that has been stopped.
|
||||
func (s *script) stopContainersAndServices() (func() error, error) {
|
||||
if s.cli == nil {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
dockerInfo, err := s.cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
|
||||
}
|
||||
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
|
||||
|
||||
labelValue := s.c.BackupStopDuringBackupLabel
|
||||
if s.c.BackupStopContainerLabel != "" {
|
||||
s.logger.Warn(
|
||||
"Using BACKUP_STOP_CONTAINER_LABEL has been deprecated and will be removed in the next major version.",
|
||||
)
|
||||
s.logger.Warn(
|
||||
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
||||
)
|
||||
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
||||
return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
||||
}
|
||||
labelValue = s.c.BackupStopContainerLabel
|
||||
}
|
||||
|
||||
filterMatchLabel := fmt.Sprintf(
|
||||
"docker-volume-backup.stop-during-backup=%s",
|
||||
labelValue,
|
||||
)
|
||||
|
||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
|
||||
}
|
||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: filterMatchLabel,
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
|
||||
}
|
||||
|
||||
var allServices []swarm.Service
|
||||
var servicesToScaleDown []handledSwarmService
|
||||
if isDockerSwarm {
|
||||
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
|
||||
}
|
||||
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||
Key: "label",
|
||||
Value: filterMatchLabel,
|
||||
}),
|
||||
Status: true,
|
||||
})
|
||||
for _, s := range matchingServices {
|
||||
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
|
||||
serviceID: s.ID,
|
||||
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
if isDockerSwarm {
|
||||
for _, container := range containersToStop {
|
||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
||||
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
|
||||
}
|
||||
for label := range parentService.Spec.Labels {
|
||||
if label == "docker-volume-backup.stop-during-backup" {
|
||||
return noop, fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
||||
container.Names[0],
|
||||
parentService.Spec.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Stopping %d out of %d running container(s) as they were labeled %s.",
|
||||
len(containersToStop),
|
||||
len(allContainers),
|
||||
filterMatchLabel,
|
||||
),
|
||||
)
|
||||
if isDockerSwarm {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Scaling down %d out of %d active service(s) as they were labeled %s.",
|
||||
len(servicesToScaleDown),
|
||||
len(allServices),
|
||||
filterMatchLabel,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
var stoppedContainers []types.Container
|
||||
var stopErrors []error
|
||||
for _, container := range containersToStop {
|
||||
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
|
||||
stopErrors = append(stopErrors, err)
|
||||
} else {
|
||||
stoppedContainers = append(stoppedContainers, container)
|
||||
}
|
||||
}
|
||||
|
||||
var scaledDownServices []handledSwarmService
|
||||
var scaleDownErrors concurrentSlice[error]
|
||||
if isDockerSwarm {
|
||||
wg := sync.WaitGroup{}
|
||||
for _, svc := range servicesToScaleDown {
|
||||
wg.Add(1)
|
||||
go func(svc handledSwarmService) {
|
||||
defer wg.Done()
|
||||
warnings, err := scaleService(s.cli, svc.serviceID, 0)
|
||||
if err != nil {
|
||||
scaleDownErrors.append(err)
|
||||
return
|
||||
}
|
||||
scaledDownServices = append(scaledDownServices, svc)
|
||||
for _, warning := range warnings {
|
||||
s.logger.Warn(
|
||||
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", svc.serviceID, warning),
|
||||
)
|
||||
}
|
||||
// progress.ServiceProgress returns too early, so we need to manually check
|
||||
// whether all containers belonging to the service have actually been removed
|
||||
if err := awaitContainerCountForService(s.cli, svc.serviceID, 0, s.c.BackupStopServiceTimeout); err != nil {
|
||||
scaleDownErrors.append(err)
|
||||
}
|
||||
}(svc)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
s.stats.Containers = ContainersStats{
|
||||
All: uint(len(allContainers)),
|
||||
ToStop: uint(len(containersToStop)),
|
||||
Stopped: uint(len(stoppedContainers)),
|
||||
StopErrors: uint(len(stopErrors)),
|
||||
}
|
||||
|
||||
s.stats.Services = ServicesStats{
|
||||
All: uint(len(allServices)),
|
||||
ToScaleDown: uint(len(servicesToScaleDown)),
|
||||
ScaledDown: uint(len(scaledDownServices)),
|
||||
ScaleDownErrors: uint(len(scaleDownErrors.value())),
|
||||
}
|
||||
|
||||
var initialErr error
|
||||
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
||||
if len(allErrors) != 0 {
|
||||
initialErr = fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
|
||||
len(allErrors),
|
||||
errors.Join(allErrors...),
|
||||
)
|
||||
}
|
||||
|
||||
return func() error {
|
||||
var restartErrors []error
|
||||
matchedServices := map[string]bool{}
|
||||
for _, container := range stoppedContainers {
|
||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok && isDockerSwarm {
|
||||
if _, ok := matchedServices[swarmServiceID]; ok {
|
||||
continue
|
||||
}
|
||||
matchedServices[swarmServiceID] = true
|
||||
// in case a container was part of a swarm service, the service requires to
|
||||
// be force updated instead of restarting the container as it would otherwise
|
||||
// remain in a "completed" state
|
||||
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||
if err != nil {
|
||||
restartErrors = append(
|
||||
restartErrors,
|
||||
fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err),
|
||||
)
|
||||
continue
|
||||
}
|
||||
service.Spec.TaskTemplate.ForceUpdate += 1
|
||||
if _, err := s.cli.ServiceUpdate(
|
||||
context.Background(), service.ID,
|
||||
service.Version, service.Spec, types.ServiceUpdateOptions{},
|
||||
); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||
restartErrors = append(restartErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
var scaleUpErrors concurrentSlice[error]
|
||||
if isDockerSwarm {
|
||||
wg := &sync.WaitGroup{}
|
||||
for _, svc := range servicesToScaleDown {
|
||||
wg.Add(1)
|
||||
go func(svc handledSwarmService) {
|
||||
defer wg.Done()
|
||||
warnings, err := scaleService(s.cli, svc.serviceID, svc.initialReplicaCount)
|
||||
if err != nil {
|
||||
scaleDownErrors.append(err)
|
||||
return
|
||||
}
|
||||
for _, warning := range warnings {
|
||||
s.logger.Warn(
|
||||
fmt.Sprintf("The Docker API returned a warning when scaling up service %s: %s", svc.serviceID, warning),
|
||||
)
|
||||
}
|
||||
}(svc)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
||||
if len(allErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w",
|
||||
len(allErrors),
|
||||
errors.Join(allErrors...),
|
||||
)
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Restarted %d container(s).",
|
||||
len(stoppedContainers),
|
||||
),
|
||||
)
|
||||
if isDockerSwarm {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
"Scaled %d service(s) back up.",
|
||||
len(scaledDownServices),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, initialErr
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var noop = func() error { return nil }
|
||||
@@ -50,3 +51,31 @@ func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
return b.writer.Write(p)
|
||||
}
|
||||
|
||||
type noopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (noopWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type handledSwarmService struct {
|
||||
serviceID string
|
||||
initialReplicaCount uint64
|
||||
}
|
||||
|
||||
type concurrentSlice[T any] struct {
|
||||
val []T
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (c *concurrentSlice[T]) append(v T) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.val = append(c.val, v)
|
||||
}
|
||||
|
||||
func (c *concurrentSlice[T]) value() []T {
|
||||
return c.val
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
title: Replace deprecated BACKUP_STOP_CONTAINER_LABEL setting
|
||||
layout: default
|
||||
parent: How Tos
|
||||
nav_order: 19
|
||||
---
|
||||
|
||||
# Replace deprecated `BACKUP_STOP_CONTAINER_LABEL` setting
|
||||
|
||||
Version `v2.36.0` deprecated the `BACKUP_STOP_CONTAINER_LABEL` setting and renamed it `BACKUP_STOP_DURING_BACKUP_LABEL` which is supposed to signal that this will stop both containers _and_ services.
|
||||
Migrating is done by renaming the key for your custom value:
|
||||
|
||||
```diff
|
||||
env:
|
||||
- BACKUP_STOP_CONTAINER_LABEL: database
|
||||
+ BACKUP_STOP_DURING_BACKUP_LABEL: database
|
||||
```
|
||||
|
||||
The old key will stay supported until the next major version, but logs a warning each time a backup is taken.
|
||||
@@ -76,7 +76,7 @@ Configuration, data about the backup run and helper functions will be passed to
|
||||
|
||||
Here is a list of all data passed to the template:
|
||||
|
||||
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
|
||||
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_DURING_BACKUP_LABEL` becomes `BackupStopDuringBackupLabel`)
|
||||
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
|
||||
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
|
||||
* `StartTime`: time when the script started execution
|
||||
@@ -89,6 +89,11 @@ Here is a list of all data passed to the template:
|
||||
* `ToStop`: number of containers matched by the stop rule
|
||||
* `Stopped`: number of containers successfully stopped
|
||||
* `StopErrors`: number of containers that were unable to be stopped (equal to `ToStop - Stopped`)
|
||||
* `Services`: object containing stats about the docker services (only populated when Docker is running in Swarm mode)
|
||||
* `All`: total number of services
|
||||
* `ToScaleDown`: number of containers matched by the scale down rule
|
||||
* `ScaledDwon`: number of containers successfully scaled down
|
||||
* `ScaleDownErrors`: number of containers that were unable to be stopped (equal to `ToScaleDown - ScaledDowm`)
|
||||
* `BackupFile`: object containing information about the backup file
|
||||
* `Name`: name of the backup file (e.g. `backup-2022-02-11T01-00-00.tar.gz`)
|
||||
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||
@@ -107,6 +112,8 @@ Some formatting and helper functions are also available:
|
||||
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
|
||||
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
|
||||
* `env`: returns the value of the environment variable of the given key if set
|
||||
* `toJson`: converting object to JSON
|
||||
* `toPrettyJson`: converting object to pretty JSON
|
||||
|
||||
## Special characters in notification URLs
|
||||
|
||||
|
||||
@@ -7,11 +7,14 @@ nav_order: 1
|
||||
|
||||
# Stop containers during backup
|
||||
|
||||
{: .note }
|
||||
In case you are running Docker in Swarm mode, [dedicated documentation](./use-with-docker-swarm.html) on service and container restart applies.
|
||||
|
||||
In many cases, it will be desirable to stop the services that are consuming the volume you want to backup in order to ensure data integrity.
|
||||
This image can automatically stop and restart containers and services.
|
||||
By default, any container that is labeled `docker-volume-backup.stop-during-backup=true` will be stopped before the backup is being taken and restarted once it has finished.
|
||||
|
||||
In case you need more fine grained control about which containers should be stopped (e.g. when backing up multiple volumes on different schedules), you can set the `BACKUP_STOP_CONTAINER_LABEL` environment variable and then use the same value for labeling:
|
||||
In case you need more fine grained control about which containers should be stopped (e.g. when backing up multiple volumes on different schedules), you can set the `BACKUP_STOP_DURING_BACKUP_LABEL` environment variable and then use the same value for labeling:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
@@ -25,7 +28,7 @@ services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
environment:
|
||||
BACKUP_STOP_CONTAINER_LABEL: service1
|
||||
BACKUP_STOP_DURING_BACKUP_LABEL: service1
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
@@ -13,5 +13,33 @@ If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL
|
||||
DOCKER_HOST=tcp://docker_socket_proxy:2375
|
||||
```
|
||||
|
||||
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
|
||||
If you do this as you seek to restrict access to the Docker socket, this tool is potentially calling the following Docker APIs:
|
||||
|
||||
| API | When |
|
||||
|-|-|
|
||||
| `Info` | always |
|
||||
| `ContainerExecCreate` | running commands from `exec-labels` |
|
||||
| `ContainerExecAttach` | running commands from `exec-labels` |
|
||||
| `ContainerExecInspect` | running commands from `exec-labels` |
|
||||
| `ContainerList` | always |
|
||||
`ServiceList` | Docker engine is running in Swarm mode |
|
||||
| `ServiceInspect` | Docker engine is running in Swarm mode |
|
||||
| `ServiceUpdate` | Docker engine is running in Swarm mode and `stop-during-backup` is used |
|
||||
| `ConatinerStop` | `stop-during-backup` labels are applied to containers |
|
||||
| `ContainerStart` | `stop-during-backup` labels are applied to container |
|
||||
|
||||
---
|
||||
|
||||
In case you are using [`docker-socket-proxy`][proxy], this means following permissions are required:
|
||||
|
||||
| Permission | When |
|
||||
|-|-|
|
||||
| INFO | always required |
|
||||
| CONTAINERS | always required |
|
||||
| POST | required when using `stop-during-backup` or `exec` labels |
|
||||
| EXEC | required when using `exec`-labeled commands |
|
||||
| SERVICES | required when Docker Engine is running in Swarm mode |
|
||||
| NODES | required when labeling services `stop-during-backup` |
|
||||
| TASKS | required when labeling services `stop-during-backup` |
|
||||
|
||||
[proxy]: https://github.com/Tecnativa/docker-socket-proxy
|
||||
|
||||
@@ -7,12 +7,66 @@ nav_order: 13
|
||||
|
||||
# Use with Docker Swarm
|
||||
|
||||
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped.
|
||||
If you plan to have your containers / services stopped during backup, this means you need to apply the `on-failure` restart policy to your service's definitions.
|
||||
A restart policy of `always` is not compatible with this tool.
|
||||
{: .note }
|
||||
The mechanisms described in this page __do only apply when Docker is running in [Swarm mode][swarm]__.
|
||||
|
||||
[swarm]: https://docs.docker.com/engine/swarm/
|
||||
|
||||
## Stopping containers during backup
|
||||
|
||||
Stopping and restarting containers during backup creation when running Docker in Swarm mode is supported in two ways.
|
||||
|
||||
{: .important }
|
||||
Make sure you label your services and containers using only one of the describe approaches.
|
||||
In case the script encounters a container that is labeled and has a parent service that is also labeled, it will exit early.
|
||||
|
||||
### Scaling services down to zero before scaling back up
|
||||
|
||||
When labeling a service in the `deploy` section, the following strategy for stopping and restarting will be used:
|
||||
|
||||
- The service is scaled down to zero replicas
|
||||
- The backup is created
|
||||
- The service is scaled back up to the previous number of replicas
|
||||
|
||||
{: .note }
|
||||
This approach will only work for services that are deployed in __replicated mode__.
|
||||
|
||||
Such a service definition could look like:
|
||||
|
||||
```yml
|
||||
services:
|
||||
app:
|
||||
image: myorg/myimage:latest
|
||||
deploy:
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
replicas: 2
|
||||
```
|
||||
|
||||
### Stopping the containers
|
||||
|
||||
This approach bypasses the services and stops containers directly, creates the backup and restarts the containers again.
|
||||
As Docker Swarm would usually try to instantly restart containers that are manually stopped, this approach only works when using the `on-failure` restart policy.
|
||||
A restart policy of `always` is not compatible with this approach.
|
||||
|
||||
Such a service definition could look like:
|
||||
|
||||
```yml
|
||||
services:
|
||||
app:
|
||||
image: myapp/myimage:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
deploy:
|
||||
replicas: 2
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Memory limit considerations
|
||||
|
||||
When running in Swarm mode, it's also advised to set a hard memory limit on your service (~25MB should be enough in most cases, but if you backup large files above half a gigabyte or similar, you might have to raise this in case the backup exits with `Killed`):
|
||||
|
||||
```yml
|
||||
|
||||
@@ -177,7 +177,7 @@ volumes:
|
||||
|
||||
## Backing up to Dropbox
|
||||
|
||||
See [Dropbox Setup](#setup-dropbox-storage-backend) on how to get the appropriate environment values.
|
||||
See [Dropbox Setup](../how-tos/set-up-dropbox.md) on how to get the appropriate environment values.
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
@@ -352,7 +352,7 @@ services:
|
||||
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
|
||||
BACKUP_STOP_CONTAINER_LABEL: service1
|
||||
BACKUP_STOP_DURING_BACKUP_LABEL: service1
|
||||
volumes:
|
||||
- data_1:/backup/data-1-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
@@ -362,7 +362,7 @@ services:
|
||||
<<: *backup_environment
|
||||
# Label the container using the `data_2` volume as `docker-volume-backup.stop-during-backup=service2`
|
||||
BACKUP_CRON_EXPRESSION: "0 3 * * *"
|
||||
BACKUP_STOP_CONTAINER_LABEL: service2
|
||||
BACKUP_STOP_DURING_BACKUP_LABEL: service2
|
||||
volumes:
|
||||
- data_2:/backup/data-2-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
@@ -23,9 +23,22 @@ You can populate below template according to your requirements and use it as you
|
||||
```
|
||||
########### BACKUP SCHEDULE
|
||||
|
||||
# Backups run on the given cron schedule in `busybox` flavor. If no
|
||||
# value is set, `@daily` will be used. If you do not want the cron
|
||||
# to ever run, use `0 0 5 31 2 ?`.
|
||||
|
||||
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
|
||||
#
|
||||
# Field name | Mandatory? | Allowed values | Allowed special characters
|
||||
# ---------- | ---------- | -------------- | --------------------------
|
||||
# Seconds | No | 0-59 | * / , -
|
||||
# Minutes | Yes | 0-59 | * / , -
|
||||
# Hours | Yes | 0-23 | * / , -
|
||||
# Day of month | Yes | 1-31 | * / , - ?
|
||||
# Month | Yes | 1-12 or JAN-DEC | * / , -
|
||||
# Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
|
||||
#
|
||||
# Month and Day-of-week field values are case insensitive.
|
||||
# "SUN", "Sun", and "sun" are equally accepted.
|
||||
# If no value is set, `@daily` will be used.
|
||||
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
|
||||
|
||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||
|
||||
@@ -316,15 +329,22 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# GPG_PASSPHRASE="<xxx>"
|
||||
|
||||
########### STOPPING CONTAINERS DURING BACKUP
|
||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||
|
||||
# Containers can be stopped by applying a
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers
|
||||
# that are labeled with `true` will be stopped. If you need more fine grained
|
||||
# control (e.g. when running multiple containers based on this image), you can
|
||||
# override this default by specifying a different value here.
|
||||
# Containers or services can be stopped by applying a
|
||||
# `docker-volume-backup.stop-during-backup` label. By default, all containers and
|
||||
# services that are labeled with `true` will be stopped. If you need more fine
|
||||
# grained control (e.g. when running multiple containers based on this image),
|
||||
# you can override this default by specifying a different value here.
|
||||
# BACKUP_STOP_DURING_BACKUP_LABEL="service1"
|
||||
|
||||
# BACKUP_STOP_CONTAINER_LABEL="service1"
|
||||
# When trying to scale down Docker Swarm services, give up after
|
||||
# the specified amount of time in case the service has not converged yet.
|
||||
# In case you need to adjust this timeout, supply a duration
|
||||
# value as per https://pkg.go.dev/time#ParseDuration to `BACKUP_STOP_SERVICE_TIMEOUT`.
|
||||
# Defaults to 5 minutes.
|
||||
|
||||
# BACKUP_STOP_SERVICE_TIMEOUT="5m"
|
||||
|
||||
########### EXECUTING COMMANDS IN CONTAINERS PRE/POST BACKUP
|
||||
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
set -e
|
||||
|
||||
if [ ! -d "/etc/dockervolumebackup/conf.d" ]; then
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
|
||||
echo "Installing cron.d entry with expression $BACKUP_CRON_EXPRESSION."
|
||||
echo "$BACKUP_CRON_EXPRESSION backup 2>&1" | crontab -
|
||||
else
|
||||
echo "/etc/dockervolumebackup/conf.d was found, using configuration files from this directory."
|
||||
|
||||
crontab -r && crontab /dev/null
|
||||
for file in /etc/dockervolumebackup/conf.d/*; do
|
||||
source $file
|
||||
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"
|
||||
echo "Appending cron.d entry with expression $BACKUP_CRON_EXPRESSION and configuration file $file"
|
||||
(crontab -l; echo "$BACKUP_CRON_EXPRESSION /bin/sh -c 'set -a; source $file; set +a && backup' 2>&1") | crontab -
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Starting cron in foreground."
|
||||
crond -f -d 8
|
||||
43
go.mod
43
go.mod
@@ -3,38 +3,43 @@ module github.com/offen/docker-volume-backup
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/cosiner/argv v0.1.0
|
||||
github.com/docker/cli v24.0.9+incompatible
|
||||
github.com/docker/docker v24.0.7+incompatible
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/klauspost/compress v1.17.2
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/klauspost/compress v1.17.6
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||
github.com/minio/minio-go/v7 v7.0.63
|
||||
github.com/minio/minio-go/v7 v7.0.67
|
||||
github.com/offen/envconfig v1.5.0
|
||||
github.com/otiai10/copy v1.14.0
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/studio-b12/gowebdav v0.9.0
|
||||
golang.org/x/crypto v0.14.0
|
||||
golang.org/x/oauth2 v0.13.0
|
||||
golang.org/x/sync v0.4.0
|
||||
golang.org/x/crypto v0.19.0
|
||||
golang.org/x/oauth2 v0.17.0
|
||||
golang.org/x/sync v0.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
@@ -42,9 +47,9 @@ require (
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
@@ -59,13 +64,13 @@ require (
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
|
||||
113
go.sum
113
go.sum
@@ -181,28 +181,28 @@ cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuW
|
||||
cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
|
||||
cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0=
|
||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -220,7 +220,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -230,8 +229,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -254,10 +253,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/cli v24.0.9+incompatible h1:OxbimnP/z+qVjDLpq9wbeFU3Nc30XhSe+LkwYQisD50=
|
||||
github.com/docker/cli v24.0.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
|
||||
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@@ -307,8 +306,8 @@ github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -385,8 +384,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
|
||||
@@ -444,6 +443,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=
|
||||
github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@@ -457,11 +458,11 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -503,8 +504,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ=
|
||||
github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
|
||||
github.com/minio/minio-go/v7 v7.0.67 h1:BeBvZWAS+kRJm1vGTMJYVjKUNoo0FoEt/wUWdUtfmh8=
|
||||
github.com/minio/minio-go/v7 v7.0.67/go.mod h1:+UXocnUeZ3wHvVh5s95gcrA4YjMIbccT6ubB+1m054A=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@@ -563,8 +564,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -594,6 +595,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
@@ -631,8 +634,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU=
|
||||
github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
@@ -675,10 +679,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -717,7 +719,6 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -776,11 +777,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -805,8 +803,8 @@ golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7Lm
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -822,8 +820,8 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -889,7 +887,6 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -919,20 +916,14 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -943,10 +934,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1012,7 +1001,6 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1259,6 +1247,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
|
||||
@@ -81,7 +81,11 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sftpClient, err := sftp.NewClient(sshClient)
|
||||
sftpClient, err := sftp.NewClient(sshClient,
|
||||
sftp.UseConcurrentReads(true),
|
||||
sftp.UseConcurrentWrites(true),
|
||||
sftp.MaxConcurrentRequestsPerFile(64),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
|
||||
}
|
||||
@@ -117,7 +121,7 @@ func (b *sshStorage) Copy(file string) error {
|
||||
}
|
||||
defer destination.Close()
|
||||
|
||||
chunk := make([]byte, 1000000)
|
||||
chunk := make([]byte, 1e9)
|
||||
for {
|
||||
num, err := source.Read(chunk)
|
||||
if err == io.EOF {
|
||||
|
||||
28
test/collision/docker-compose.yml
Normal file
28
test/collision/docker-compose.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
environment:
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
volumes:
|
||||
- offen_data:/backup/offen_data:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
deploy:
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
replicas: 2
|
||||
volumes:
|
||||
- offen_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
offen_data:
|
||||
34
test/collision/run.sh
Executable file
34
test/collision/run.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
info "Backup container not ready yet. Retrying."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 20
|
||||
|
||||
set +e
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
if [ $? = "0" ]; then
|
||||
fail "Expected script to exit with error code."
|
||||
fi
|
||||
|
||||
if [ -f "${LOCAL_DIR}/test.tar.gz" ]; then
|
||||
fail "Found backup file that should not have been created."
|
||||
fi
|
||||
|
||||
expect_running_containers "3"
|
||||
|
||||
pass "Script did not perform backup as there was a label collision."
|
||||
@@ -1,2 +1,2 @@
|
||||
BACKUP_FILENAME="conf.tar.gz"
|
||||
NAME="conf"
|
||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
BACKUP_FILENAME="other.tar.gz"
|
||||
NAME="other"
|
||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
BACKUP_FILENAME="never.tar.gz"
|
||||
NAME="never"
|
||||
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||
|
||||
@@ -4,6 +4,9 @@ services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_FILENAME: $$NAME.tar.gz
|
||||
BACKUP_FILENAME_EXPAND: 'true'
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
|
||||
@@ -13,6 +13,8 @@ docker compose up -d --quiet-pull
|
||||
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||
sleep 100
|
||||
|
||||
docker compose logs backup
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
||||
fail "Config from file was not used."
|
||||
fi
|
||||
|
||||
23
test/lock/docker-compose.yml
Normal file
23
test/lock/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: '7'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
34
test/lock/run.sh
Executable file
34
test/lock/run.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
ec=0
|
||||
|
||||
docker compose exec -e BACKUP_RETENTION_DAYS=7 -e BACKUP_FILENAME=test.tar.gz backup backup & \
|
||||
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
|
||||
|
||||
if [ "$ec" = "0" ]; then
|
||||
fail "Subsequent invocation exited 0"
|
||||
fi
|
||||
pass "Subsequent invocation did not exit 0"
|
||||
|
||||
sleep 5
|
||||
|
||||
if [ ! -f "${LOCAL_DIR}/test.tar.gz" ]; then
|
||||
fail "Could not find expected tar file"
|
||||
fi
|
||||
pass "Found expected tar file"
|
||||
|
||||
if [ -f "${LOCAL_DIR}/test2.tar.gz" ]; then
|
||||
fail "Subsequent invocation was expected to fail but created archive"
|
||||
fi
|
||||
pass "Subsequent invocation did not create archive"
|
||||
40
test/proxy/docker-compose.swarm.yml
Normal file
40
test/proxy/docker-compose.swarm.yml
Normal file
@@ -0,0 +1,40 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
environment:
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
DOCKER_HOST: tcp://docker_socket_proxy:2375
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- ${LOCAL_DIR:-local}:/archive
|
||||
|
||||
docker_socket_proxy:
|
||||
image: tecnativa/docker-socket-proxy:0.1
|
||||
environment:
|
||||
INFO: ${ALLOW_INFO:-1}
|
||||
CONTAINERS: ${ALLOW_CONTAINERS:-1}
|
||||
SERVICES: ${ALLOW_SERVICES:-1}
|
||||
POST: ${ALLOW_POST:-1}
|
||||
TASKS: ${ALLOW_TASKS:-1}
|
||||
NODES: ${ALLOW_NODES:-1}
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
pg:
|
||||
image: postgres:14-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: example
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
deploy:
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
|
||||
volumes:
|
||||
pg_data:
|
||||
36
test/proxy/docker-compose.yml
Normal file
36
test/proxy/docker-compose.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
environment:
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
DOCKER_HOST: tcp://docker_socket_proxy:2375
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- ${LOCAL_DIR:-local}:/archive
|
||||
|
||||
docker_socket_proxy:
|
||||
image: tecnativa/docker-socket-proxy:0.1
|
||||
environment:
|
||||
INFO: ${ALLOW_INFO:-1}
|
||||
CONTAINERS: ${ALLOW_CONTAINERS:-1}
|
||||
POST: ${ALLOW_POST:-1}
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
pg:
|
||||
image: postgres:14-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: example
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
|
||||
volumes:
|
||||
pg_data:
|
||||
76
test/proxy/run.sh
Executable file
76
test/proxy/run.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
# The default configuration in docker-compose.yml should
|
||||
# successfully create a backup.
|
||||
docker compose exec backup backup
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "3"
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||
fail "Archive was not created"
|
||||
fi
|
||||
pass "Found relevant archive file."
|
||||
|
||||
# Disabling POST should make the backup run fail
|
||||
ALLOW_POST="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
set +e
|
||||
docker compose exec backup backup
|
||||
if [ $? = "0" ]; then
|
||||
fail "Expected invocation to exit non-zero."
|
||||
fi
|
||||
set -e
|
||||
pass "Invocation exited non-zero."
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
# Next, the test is run against a Swarm setup
|
||||
|
||||
docker swarm init
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
|
||||
|
||||
sleep 20
|
||||
|
||||
# The default configuration in docker-compose.swarm.yml should
|
||||
# successfully create a backup in Swarm mode.
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||
fail "Archive was not created"
|
||||
fi
|
||||
|
||||
pass "Found relevant archive file."
|
||||
|
||||
sleep 5
|
||||
expect_running_containers "3"
|
||||
|
||||
# Disabling POST should make the backup run fail
|
||||
ALLOW_POST="0" docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
|
||||
|
||||
sleep 20
|
||||
|
||||
set +e
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
if [ $? = "0" ]; then
|
||||
fail "Expected invocation to exit non-zero."
|
||||
fi
|
||||
set -e
|
||||
|
||||
pass "Invocation exited non-zero."
|
||||
57
test/services/docker-compose.yml
Normal file
57
test/services/docker-compose.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: Unlicense
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
environment:
|
||||
MINIO_ROOT_USER: test
|
||||
MINIO_ROOT_PASSWORD: test
|
||||
MINIO_ACCESS_KEY: test
|
||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||
volumes:
|
||||
- backup_data:/data
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
depends_on:
|
||||
- minio
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||
AWS_ENDPOINT: minio:9000
|
||||
AWS_ENDPOINT_PROTO: http
|
||||
AWS_S3_BUCKET_NAME: backup
|
||||
BACKUP_FILENAME: test.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: 7
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
volumes:
|
||||
- pg_data:/backup/pg_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
deploy:
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
replicas: 2
|
||||
|
||||
pg:
|
||||
image: postgres:14-alpine
|
||||
environment:
|
||||
POSTGRES_PASSWORD: example
|
||||
volumes:
|
||||
- pg_data:/var/lib/postgresql/data
|
||||
deploy:
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
|
||||
volumes:
|
||||
backup_data:
|
||||
name: backup_data
|
||||
pg_data:
|
||||
name: pg_data
|
||||
29
test/services/run.sh
Executable file
29
test/services/run.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
|
||||
while [ -z $(docker ps -q -f name=backup) ]; do
|
||||
info "Backup container not ready yet. Retrying."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
sleep 20
|
||||
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
docker run --rm \
|
||||
-v backup_data:/data alpine \
|
||||
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'
|
||||
|
||||
pass "Found relevant files in untared backup."
|
||||
|
||||
sleep 5
|
||||
expect_running_containers "5"
|
||||
@@ -56,7 +56,7 @@ for dir in $(find $find_args | sort); do
|
||||
docker run $docker_run_args offen/docker-volume-backup:test-sandbox
|
||||
|
||||
retry_counter=0
|
||||
until docker exec $sandbox /bin/sh -c 'docker info' > /dev/null 2>&1; do
|
||||
until timeout 5 docker exec $sandbox /bin/sh -c 'docker info' > /dev/null 2>&1; do
|
||||
if [ $retry_counter -gt 20 ]; then
|
||||
echo "Gave up waiting for Docker daemon to become ready after 20 attempts"
|
||||
exit 1
|
||||
|
||||
Reference in New Issue
Block a user