Compare commits

..

14 Commits

Author SHA1 Message Date
Frederik Ring
d0f3f41fe7 Add additional check if all containers have been removed 2024-01-27 15:37:31 +01:00
Frederik Ring
ba095ea753 Document script behavior on label collision 2024-01-27 13:50:56 +01:00
Frederik Ring
6d0e96ec40 Check whether container and service labels collide 2024-01-27 13:08:06 +01:00
Frederik Ring
a430772e29 Log warnings from Docker when updating services 2024-01-27 12:32:06 +01:00
Frederik Ring
4e38760e5a Do not rely on PreviousSpec for storing desired replica count 2024-01-27 12:23:30 +01:00
Frederik Ring
6a49a53bc3 Document services stats 2024-01-26 21:23:22 +01:00
Frederik Ring
35e6d01c93 Downgrade Docker CLI to match client 2024-01-26 20:55:17 +01:00
Frederik Ring
1ee0b43294 Document scale-up/down approach in docs 2024-01-26 20:25:59 +01:00
Frederik Ring
c337b35a06 Clean up error and log messages 2024-01-26 20:01:45 +01:00
Frederik Ring
8a625b4a5a In test, label both services 2024-01-26 16:36:42 +01:00
Frederik Ring
e3062a4df7 Use progress tool from Docker CLI 2024-01-26 15:59:30 +01:00
Frederik Ring
5de5046f19 Scale services back up 2024-01-25 20:14:15 +01:00
Frederik Ring
27017e69d0 Try scaling down services 2024-01-25 20:06:29 +01:00
Frederik Ring
4b79a05971 Query for labeled services as well 2024-01-25 19:46:52 +01:00
24 changed files with 369 additions and 880 deletions

View File

@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:

View File

@@ -15,8 +15,8 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.21'
cache: false

View File

@@ -13,7 +13,7 @@ jobs:
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2

View File

@@ -10,7 +10,7 @@ jobs:
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2

View File

@@ -18,4 +18,4 @@ RUN apk add --no-cache ca-certificates
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY --chmod=755 ./entrypoint.sh /root/
ENTRYPOINT ["/usr/bin/backup", "-foreground"]
ENTRYPOINT ["/root/entrypoint.sh"]

View File

@@ -34,13 +34,10 @@ type Config struct {
BackupFilenameExpand bool `split_words:"true"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupCronExpression string `split_words:"true" default:"@daily"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
BackupPruningPrefix string `split_words:"true"`
BackupStopContainerLabel string `split_words:"true"`
BackupStopDuringBackupLabel string `split_words:"true" default:"true"`
BackupStopServiceTimeout time.Duration `split_words:"true" default:"5m"`
BackupStopContainerLabel string `split_words:"true" default:"true"`
BackupFromSnapshot bool `split_words:"true"`
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
BackupSkipBackendsFromPrune []string `split_words:"true"`

View File

@@ -1,81 +0,0 @@
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package main
import (
"fmt"
"os"
"path/filepath"
"github.com/joho/godotenv"
"github.com/offen/envconfig"
)
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
type envProxy func(string) (string, bool)
func loadConfig(lookup envProxy) (*Config, error) {
envconfig.Lookup = func(key string) (string, bool) {
value, okValue := lookup(key)
location, okFile := lookup(key + "_FILE")
switch {
case okValue && !okFile: // only value
return value, true
case !okValue && okFile: // only file
contents, err := os.ReadFile(location)
if err != nil {
return "", false
}
return string(contents), true
case okValue && okFile: // both
return "", false
default: // neither, ignore
return "", false
}
}
var c = &Config{}
if err := envconfig.Process("", c); err != nil {
return nil, fmt.Errorf("failed to process configuration values, error: %w", err)
}
return c, nil
}
func loadEnvVars() (*Config, error) {
return loadConfig(os.LookupEnv)
}
func loadEnvFiles(directory string) ([]*Config, error) {
items, err := os.ReadDir(directory)
if err != nil {
if os.IsNotExist(err) {
return nil, err
}
return nil, fmt.Errorf("failed to read files from env directory, error: %w", err)
}
var cs = make([]*Config, 0)
for _, item := range items {
if !item.IsDir() {
p := filepath.Join(directory, item.Name())
envFile, err := godotenv.Read(p)
if err != nil {
return nil, fmt.Errorf("error reading config file %s, error: %w", p, err)
}
lookup := func(key string) (string, bool) {
val, ok := envFile[key]
return val, ok
}
c, err := loadConfig(lookup)
if err != nil {
return nil, fmt.Errorf("error loading config from file %s, error: %w", p, err)
}
cs = append(cs, c)
}
}
return cs, nil
}

View File

@@ -4,53 +4,31 @@
package main
import (
"flag"
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"github.com/robfig/cron/v3"
)
func runScript(c *Config) (ret error) {
s, err := newScript(c)
func main() {
s, err := newScript()
if err != nil {
return err
panic(err)
}
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
if err != nil {
return err
}
defer func() {
err = unlock()
if err != nil {
ret = err
}
}()
defer s.must(unlock())
s.must(err)
defer func() {
if pArg := recover(); pArg != nil {
if err, ok := pArg.(error); ok {
s.logger.Error(
fmt.Sprintf("Executing the script encountered a panic: %v", err),
)
if hookErr := s.runHooks(err); hookErr != nil {
s.logger.Error(
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
)
}
ret = err
} else {
s.logger.Error(
fmt.Sprintf("Executing the script encountered an unrecoverable panic: %v", err),
)
panic(pArg)
os.Exit(1)
}
panic(pArg)
}
if err := s.runHooks(nil); err != nil {
@@ -60,18 +38,18 @@ func runScript(c *Config) (ret error) {
err,
),
)
ret = err
os.Exit(1)
}
s.logger.Info("Finished running backup tasks.")
}()
s.must(s.withLabeledCommands(lifecyclePhaseArchive, func() error {
restartContainersAndServices, err := s.stopContainersAndServices()
restartContainers, err := s.stopContainersAndServices()
// The mechanism for restarting containers is not using hooks as it
// should happen as soon as possible (i.e. before uploading backups or
// similar).
defer func() {
s.must(restartContainersAndServices())
s.must(restartContainers())
}()
if err != nil {
return err
@@ -82,90 +60,4 @@ func runScript(c *Config) (ret error) {
s.must(s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)())
s.must(s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)())
s.must(s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)())
return nil
}
func runInForeground() error {
cr := cron.New(
cron.WithParser(
cron.NewParser(
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
),
),
)
addJob := func(c *Config) error {
_, err := cr.AddFunc(c.BackupCronExpression, func() {
err := runScript(c)
if err != nil {
slog.Error("unexpected error during backup", "error", err)
}
})
return err
}
cs, err := loadEnvFiles("/etc/dockervolumebackup/conf.d")
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("could not load config from environment files, error: %w", err)
}
c, err := loadEnvVars()
if err != nil {
return fmt.Errorf("could not load config from environment variables")
} else {
err = addJob(c)
if err != nil {
return fmt.Errorf("could not add cron job, error: %w", err)
}
}
} else {
for _, c := range cs {
err = addJob(c)
if err != nil {
return fmt.Errorf("could not add cron job, error: %w", err)
}
}
}
var quit = make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
cr.Start()
<-quit
ctx := cr.Stop()
<-ctx.Done()
return nil
}
func runAsCommand() error {
c, err := loadEnvVars()
if err != nil {
return fmt.Errorf("could not load config from environment variables, error: %w", err)
}
err = runScript(c)
if err != nil {
return fmt.Errorf("unexpected error during backup, error: %w", err)
}
return nil
}
func main() {
serve := flag.Bool("foreground", false, "run the tool in the foreground")
flag.Parse()
var err error
if *serve {
err = runInForeground()
} else {
err = runAsCommand()
}
if err != nil {
slog.Error("ran into an issue during execution", "error", err)
os.Exit(1)
}
}

View File

@@ -5,6 +5,8 @@ package main
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/fs"
@@ -28,8 +30,14 @@ import (
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
"github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/cli/cli/command/service/progress"
"github.com/docker/docker/api/types"
ctr "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/leekchan/timeutil"
"github.com/offen/envconfig"
"github.com/otiai10/copy"
"golang.org/x/sync/errgroup"
)
@@ -57,10 +65,10 @@ type script struct {
// remote resources like the Docker engine or remote storage locations. All
// reading from env vars or other configuration sources is expected to happen
// in this method.
func newScript(c *Config) (*script, error) {
func newScript() (*script, error) {
stdOut, logBuffer := buffer(os.Stdout)
s := &script{
c: c,
c: &Config{},
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
stats: &Stats{
StartTime: time.Now(),
@@ -82,6 +90,32 @@ func newScript(c *Config) (*script, error) {
return nil
})
envconfig.Lookup = func(key string) (string, bool) {
value, okValue := os.LookupEnv(key)
location, okFile := os.LookupEnv(key + "_FILE")
switch {
case okValue && !okFile: // only value
return value, true
case !okValue && okFile: // only file
contents, err := os.ReadFile(location)
if err != nil {
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
return "", false
}
return string(contents), true
case okValue && okFile: // both
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
return "", false
default: // neither, ignore
return "", false
}
}
if err := envconfig.Process("", s.c); err != nil {
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
}
s.file = path.Join("/tmp", s.c.BackupFilename)
tmplFileName, tErr := template.New("extension").Parse(s.file)
@@ -285,6 +319,302 @@ func newScript(c *Config) (*script, error) {
return s, nil
}
type noopWriteCloser struct {
io.Writer
}
func (noopWriteCloser) Close() error {
return nil
}
type handledSwarmService struct {
serviceID string
initialReplicaCount uint64
}
// stopContainersAndServices stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
func (s *script) stopContainersAndServices() (func() error, error) {
if s.cli == nil {
return noop, nil
}
dockerInfo, err := s.cli.Info(context.Background())
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
}
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
discardWriter := &noopWriteCloser{io.Discard}
filterMatchLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
s.c.BackupStopContainerLabel,
)
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
}
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: filterMatchLabel,
}),
})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
}
var allServices []swarm.Service
var servicesToScaleDown []handledSwarmService
if isDockerSwarm {
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
}
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: filterMatchLabel,
}),
Status: true,
})
for _, s := range matchingServices {
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
serviceID: s.ID,
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
})
}
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
}
}
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
return noop, nil
}
if isDockerSwarm {
for _, container := range containersToStop {
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
}
for label := range parentService.Spec.Labels {
if label == "docker-volume-backup.stop-during-backup" {
return noop, fmt.Errorf(
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
container.Names[0],
parentService.Spec.Name,
)
}
}
}
}
}
s.logger.Info(
fmt.Sprintf(
"Stopping %d out of %d running container(s) and scaling down %d out of %d active service(s) as they were labeled %s.",
len(containersToStop),
len(allContainers),
len(servicesToScaleDown),
len(allServices),
filterMatchLabel,
),
)
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
var scaledDownServices []swarm.Service
var scaleDownErrors []error
if isDockerSwarm {
for _, svc := range servicesToScaleDown {
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), svc.serviceID, types.ServiceInspectOptions{})
if err != nil {
scaleDownErrors = append(
scaleDownErrors,
fmt.Errorf("(*script).stopContainersAndServices: error inspecting service %s: %w", svc.serviceID, err),
)
continue
}
var zero uint64 = 0
serviceMode := &service.Spec.Mode
switch {
case serviceMode.Replicated != nil:
serviceMode.Replicated.Replicas = &zero
default:
scaleDownErrors = append(
scaleDownErrors,
fmt.Errorf("(*script).stopContainersAndServices: labeled service %s has to be in replicated mode", service.Spec.Name),
)
continue
}
response, err := s.cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
if err != nil {
scaleDownErrors = append(scaleDownErrors, err)
continue
}
for _, warning := range response.Warnings {
s.logger.Warn(
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", service.Spec.Name, warning),
)
}
if err := progress.ServiceProgress(context.Background(), s.cli, service.ID, discardWriter); err != nil {
scaleDownErrors = append(scaleDownErrors, err)
} else {
scaledDownServices = append(scaledDownServices, service)
}
// progress.ServiceProgress returns too early, so we need to manually check
// whether all containers belonging to the service have actually been removed
for {
containers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", service.ID),
}),
})
if err != nil {
scaleDownErrors = append(scaleDownErrors, err)
break
}
if len(containers) == 0 {
break
}
time.Sleep(time.Second)
}
}
}
s.stats.Containers = ContainersStats{
All: uint(len(allContainers)),
ToStop: uint(len(containersToStop)),
Stopped: uint(len(stoppedContainers)),
StopErrors: uint(len(stopErrors)),
}
s.stats.Services = ServicesStats{
All: uint(len(allServices)),
ToScaleDown: uint(len(servicesToScaleDown)),
ScaledDown: uint(len(scaledDownServices)),
ScaleDownErrors: uint(len(scaleDownErrors)),
}
var initialErr error
allErrors := append(stopErrors, scaleDownErrors...)
if len(allErrors) != 0 {
initialErr = fmt.Errorf(
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
len(allErrors),
errors.Join(allErrors...),
)
}
return func() error {
servicesRequiringForceUpdate := map[string]struct{}{}
var restartErrors []error
for _, container := range stoppedContainers {
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
servicesRequiringForceUpdate[swarmServiceName] = struct{}{}
continue
}
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
}
}
if len(servicesRequiringForceUpdate) != 0 {
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
for serviceName := range servicesRequiringForceUpdate {
var serviceMatch swarm.Service
for _, service := range services {
if service.Spec.Name == serviceName {
serviceMatch = service
break
}
}
if serviceMatch.ID == "" {
restartErrors = append(
restartErrors,
fmt.Errorf("(*script).stopContainersAndServices: couldn't find service with name %s", serviceName),
)
continue
}
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
); err != nil {
restartErrors = append(restartErrors, err)
}
}
}
var scaleUpErrors []error
if isDockerSwarm {
for _, svc := range servicesToScaleDown {
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), svc.serviceID, types.ServiceInspectOptions{})
if err != nil {
scaleUpErrors = append(scaleUpErrors, err)
continue
}
service.Spec.Mode.Replicated.Replicas = &svc.initialReplicaCount
response, err := s.cli.ServiceUpdate(
context.Background(),
service.ID,
service.Version, service.Spec,
types.ServiceUpdateOptions{},
)
if err != nil {
scaleUpErrors = append(scaleUpErrors, err)
continue
}
for _, warning := range response.Warnings {
s.logger.Warn(
fmt.Sprintf("The Docker API returned a warning when scaling up service %s: %s", service.Spec.Name, warning),
)
}
if err := progress.ServiceProgress(context.Background(), s.cli, service.ID, discardWriter); err != nil {
scaleUpErrors = append(scaleUpErrors, err)
}
}
}
allErrors := append(restartErrors, scaleUpErrors...)
if len(allErrors) != 0 {
return fmt.Errorf(
"stopContainers: %d error(s) restarting containers and services: %w",
len(allErrors),
errors.Join(allErrors...),
)
}
s.logger.Info(
fmt.Sprintf(
"Restarted %d container(s) and %d service(s).",
len(stoppedContainers),
len(scaledDownServices),
),
)
return nil
}, initialErr
}
// createArchive creates a tar archive of the configured backup location and
// saves it to disk.
func (s *script) createArchive() error {
@@ -295,7 +625,7 @@ func (s *script) createArchive() error {
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the README for an upgrade guide.",
)
backupSources = filepath.Join("/tmp", s.c.BackupSources)
// copy before compressing guard against a situation where backup folder's content are still growing.

View File

@@ -1,338 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/docker/cli/cli/command/service/progress"
"github.com/docker/docker/api/types"
ctr "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
)
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
if err != nil {
return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err)
}
serviceMode := &service.Spec.Mode
switch {
case serviceMode.Replicated != nil:
serviceMode.Replicated.Replicas = &replicas
default:
return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name)
}
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
if err != nil {
return nil, fmt.Errorf("scaleService: error updating service: %w", err)
}
discardWriter := &noopWriteCloser{io.Discard}
if err := progress.ServiceProgress(context.Background(), cli, service.ID, discardWriter); err != nil {
return nil, err
}
return response.Warnings, nil
}
func awaitContainerCountForService(cli *client.Client, serviceID string, count int, timeoutAfter time.Duration) error {
poll := time.NewTicker(time.Second)
timeout := time.NewTimer(timeoutAfter)
defer timeout.Stop()
defer poll.Stop()
for {
select {
case <-timeout.C:
return fmt.Errorf(
"awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d",
timeoutAfter,
serviceID,
count,
)
case <-poll.C:
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
}),
})
if err != nil {
return fmt.Errorf("awaitContainerCount: error listing containers: %w", err)
}
if len(containers) == count {
return nil
}
}
}
}
// stopContainersAndServices stops all Docker containers that are marked as to being
// stopped during the backup and returns a function that can be called to
// restart everything that has been stopped.
func (s *script) stopContainersAndServices() (func() error, error) {
if s.cli == nil {
return noop, nil
}
dockerInfo, err := s.cli.Info(context.Background())
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
}
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
labelValue := s.c.BackupStopDuringBackupLabel
if s.c.BackupStopContainerLabel != "" {
s.logger.Warn(
"Using BACKUP_STOP_CONTAINER_LABEL has been deprecated and will be removed in the next major version.",
)
s.logger.Warn(
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
)
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
}
labelValue = s.c.BackupStopContainerLabel
}
filterMatchLabel := fmt.Sprintf(
"docker-volume-backup.stop-during-backup=%s",
labelValue,
)
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
}
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: filterMatchLabel,
}),
})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
}
var allServices []swarm.Service
var servicesToScaleDown []handledSwarmService
if isDockerSwarm {
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
}
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label",
Value: filterMatchLabel,
}),
Status: true,
})
for _, s := range matchingServices {
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
serviceID: s.ID,
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
})
}
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
}
}
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
return noop, nil
}
if isDockerSwarm {
for _, container := range containersToStop {
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
if err != nil {
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
}
for label := range parentService.Spec.Labels {
if label == "docker-volume-backup.stop-during-backup" {
return noop, fmt.Errorf(
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
container.Names[0],
parentService.Spec.Name,
)
}
}
}
}
}
s.logger.Info(
fmt.Sprintf(
"Stopping %d out of %d running container(s) as they were labeled %s.",
len(containersToStop),
len(allContainers),
filterMatchLabel,
),
)
if isDockerSwarm {
s.logger.Info(
fmt.Sprintf(
"Scaling down %d out of %d active service(s) as they were labeled %s.",
len(servicesToScaleDown),
len(allServices),
filterMatchLabel,
),
)
}
var stoppedContainers []types.Container
var stopErrors []error
for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
stopErrors = append(stopErrors, err)
} else {
stoppedContainers = append(stoppedContainers, container)
}
}
var scaledDownServices []handledSwarmService
var scaleDownErrors concurrentSlice[error]
if isDockerSwarm {
wg := sync.WaitGroup{}
for _, svc := range servicesToScaleDown {
wg.Add(1)
go func(svc handledSwarmService) {
defer wg.Done()
warnings, err := scaleService(s.cli, svc.serviceID, 0)
if err != nil {
scaleDownErrors.append(err)
return
}
scaledDownServices = append(scaledDownServices, svc)
for _, warning := range warnings {
s.logger.Warn(
fmt.Sprintf("The Docker API returned a warning when scaling down service %s: %s", svc.serviceID, warning),
)
}
// progress.ServiceProgress returns too early, so we need to manually check
// whether all containers belonging to the service have actually been removed
if err := awaitContainerCountForService(s.cli, svc.serviceID, 0, s.c.BackupStopServiceTimeout); err != nil {
scaleDownErrors.append(err)
}
}(svc)
}
wg.Wait()
}
s.stats.Containers = ContainersStats{
All: uint(len(allContainers)),
ToStop: uint(len(containersToStop)),
Stopped: uint(len(stoppedContainers)),
StopErrors: uint(len(stopErrors)),
}
s.stats.Services = ServicesStats{
All: uint(len(allServices)),
ToScaleDown: uint(len(servicesToScaleDown)),
ScaledDown: uint(len(scaledDownServices)),
ScaleDownErrors: uint(len(scaleDownErrors.value())),
}
var initialErr error
allErrors := append(stopErrors, scaleDownErrors.value()...)
if len(allErrors) != 0 {
initialErr = fmt.Errorf(
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
len(allErrors),
errors.Join(allErrors...),
)
}
return func() error {
var restartErrors []error
matchedServices := map[string]bool{}
for _, container := range stoppedContainers {
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok && isDockerSwarm {
if _, ok := matchedServices[swarmServiceID]; ok {
continue
}
matchedServices[swarmServiceID] = true
// in case a container was part of a swarm service, the service requires to
// be force updated instead of restarting the container as it would otherwise
// remain in a "completed" state
service, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
if err != nil {
restartErrors = append(
restartErrors,
fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err),
)
continue
}
service.Spec.TaskTemplate.ForceUpdate += 1
if _, err := s.cli.ServiceUpdate(
context.Background(), service.ID,
service.Version, service.Spec, types.ServiceUpdateOptions{},
); err != nil {
restartErrors = append(restartErrors, err)
}
continue
}
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
restartErrors = append(restartErrors, err)
}
}
var scaleUpErrors concurrentSlice[error]
if isDockerSwarm {
wg := &sync.WaitGroup{}
for _, svc := range servicesToScaleDown {
wg.Add(1)
go func(svc handledSwarmService) {
defer wg.Done()
warnings, err := scaleService(s.cli, svc.serviceID, svc.initialReplicaCount)
if err != nil {
scaleDownErrors.append(err)
return
}
for _, warning := range warnings {
s.logger.Warn(
fmt.Sprintf("The Docker API returned a warning when scaling up service %s: %s", svc.serviceID, warning),
)
}
}(svc)
}
wg.Wait()
}
allErrors := append(restartErrors, scaleUpErrors.value()...)
if len(allErrors) != 0 {
return fmt.Errorf(
"(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w",
len(allErrors),
errors.Join(allErrors...),
)
}
s.logger.Info(
fmt.Sprintf(
"Restarted %d container(s).",
len(stoppedContainers),
),
)
if isDockerSwarm {
s.logger.Info(
fmt.Sprintf(
"Scaled %d service(s) back up.",
len(scaledDownServices),
),
)
}
return nil
}, initialErr
}

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"io"
"os"
"sync"
)
var noop = func() error { return nil }
@@ -51,31 +50,3 @@ func (b *bufferingWriter) Write(p []byte) (n int, err error) {
}
return b.writer.Write(p)
}
type noopWriteCloser struct {
io.Writer
}
func (noopWriteCloser) Close() error {
return nil
}
type handledSwarmService struct {
serviceID string
initialReplicaCount uint64
}
type concurrentSlice[T any] struct {
val []T
sync.Mutex
}
func (c *concurrentSlice[T]) append(v T) {
c.Lock()
defer c.Unlock()
c.val = append(c.val, v)
}
func (c *concurrentSlice[T]) value() []T {
return c.val
}

View File

@@ -1,19 +0,0 @@
---
title: Replace deprecated BACKUP_STOP_CONTAINER_LABEL setting
layout: default
parent: How Tos
nav_order: 19
---
# Replace deprecated `BACKUP_STOP_CONTAINER_LABEL` setting
Version `v2.36.0` deprecated the `BACKUP_STOP_CONTAINER_LABEL` setting and renamed it `BACKUP_STOP_DURING_BACKUP_LABEL` which is supposed to signal that this will stop both containers _and_ services.
Migrating is done by renaming the key for your custom value:
```diff
env:
- BACKUP_STOP_CONTAINER_LABEL: database
+ BACKUP_STOP_DURING_BACKUP_LABEL: database
```
The old key will stay supported until the next major version, but logs a warning each time a backup is taken.

View File

@@ -76,7 +76,7 @@ Configuration, data about the backup run and helper functions will be passed to
Here is a list of all data passed to the template:
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_DURING_BACKUP_LABEL` becomes `BackupStopDuringBackupLabel`)
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
* `StartTime`: time when the script started execution

View File

@@ -14,7 +14,7 @@ In many cases, it will be desirable to stop the services that are consuming the
This image can automatically stop and restart containers and services.
By default, any container that is labeled `docker-volume-backup.stop-during-backup=true` will be stopped before the backup is being taken and restarted once it has finished.
In case you need more fine grained control about which containers should be stopped (e.g. when backing up multiple volumes on different schedules), you can set the `BACKUP_STOP_DURING_BACKUP_LABEL` environment variable and then use the same value for labeling:
In case you need more fine grained control about which containers should be stopped (e.g. when backing up multiple volumes on different schedules), you can set the `BACKUP_STOP_CONTAINER_LABEL` environment variable and then use the same value for labeling:
```yml
version: '3'
@@ -28,7 +28,7 @@ services:
backup:
image: offen/docker-volume-backup:v2
environment:
BACKUP_STOP_DURING_BACKUP_LABEL: service1
BACKUP_STOP_CONTAINER_LABEL: service1
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro

View File

@@ -13,33 +13,5 @@ If you are interfacing with Docker via TCP, set `DOCKER_HOST` to the correct URL
DOCKER_HOST=tcp://docker_socket_proxy:2375
```
If you do this as you seek to restrict access to the Docker socket, this tool is potentially calling the following Docker APIs:
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
| API | When |
|-|-|
| `Info` | always |
| `ContainerExecCreate` | running commands from `exec-labels` |
| `ContainerExecAttach` | running commands from `exec-labels` |
| `ContainerExecInspect` | running commands from `exec-labels` |
| `ContainerList` | always |
`ServiceList` | Docker engine is running in Swarm mode |
| `ServiceInspect` | Docker engine is running in Swarm mode |
| `ServiceUpdate` | Docker engine is running in Swarm mode and `stop-during-backup` is used |
| `ConatinerStop` | `stop-during-backup` labels are applied to containers |
| `ContainerStart` | `stop-during-backup` labels are applied to container |
---
In case you are using [`docker-socket-proxy`][proxy], this means following permissions are required:
| Permission | When |
|-|-|
| INFO | always required |
| CONTAINERS | always required |
| POST | required when using `stop-during-backup` or `exec` labels |
| EXEC | required when using `exec`-labeled commands |
| SERVICES | required when Docker Engine is running in Swarm mode |
| NODES | required when labeling services `stop-during-backup` |
| TASKS | required when labeling services `stop-during-backup` |
[proxy]: https://github.com/Tecnativa/docker-socket-proxy

View File

@@ -352,7 +352,7 @@ services:
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`
BACKUP_STOP_DURING_BACKUP_LABEL: service1
BACKUP_STOP_CONTAINER_LABEL: service1
volumes:
- data_1:/backup/data-1-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -362,7 +362,7 @@ services:
<<: *backup_environment
# Label the container using the `data_2` volume as `docker-volume-backup.stop-during-backup=service2`
BACKUP_CRON_EXPRESSION: "0 3 * * *"
BACKUP_STOP_DURING_BACKUP_LABEL: service2
BACKUP_STOP_CONTAINER_LABEL: service2
volumes:
- data_2:/backup/data-2-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro

View File

@@ -23,22 +23,9 @@ You can populate below template according to your requirements and use it as you
```
########### BACKUP SCHEDULE
# A cron expression represents a set of times, using 5 or 6 space-separated fields.
#
# Field name | Mandatory? | Allowed values | Allowed special characters
# ---------- | ---------- | -------------- | --------------------------
# Seconds | No | 0-59 | * / , -
# Minutes | Yes | 0-59 | * / , -
# Hours | Yes | 0-23 | * / , -
# Day of month | Yes | 1-31 | * / , - ?
# Month | Yes | 1-12 or JAN-DEC | * / , -
# Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
#
# Month and Day-of-week field values are case insensitive.
# "SUN", "Sun", and "sun" are equally accepted.
# If no value is set, `@daily` will be used.
# If you do not want the cron to ever run, use `0 0 5 31 2 ?`.
# Backups run on the given cron schedule in `busybox` flavor. If no
# value is set, `@daily` will be used. If you do not want the cron
# to ever run, use `0 0 5 31 2 ?`.
# BACKUP_CRON_EXPRESSION="0 2 * * *"
@@ -329,22 +316,15 @@ You can populate below template according to your requirements and use it as you
# GPG_PASSPHRASE="<xxx>"
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
########### STOPPING CONTAINERS DURING BACKUP
# Containers or services can be stopped by applying a
# `docker-volume-backup.stop-during-backup` label. By default, all containers and
# services that are labeled with `true` will be stopped. If you need more fine
# grained control (e.g. when running multiple containers based on this image),
# you can override this default by specifying a different value here.
# BACKUP_STOP_DURING_BACKUP_LABEL="service1"
# Containers can be stopped by applying a
# `docker-volume-backup.stop-during-backup` label. By default, all containers
# that are labeled with `true` will be stopped. If you need more fine grained
# control (e.g. when running multiple containers based on this image), you can
# override this default by specifying a different value here.
# When trying to scale down Docker Swarm services, give up after
# the specified amount of time in case the service has not converged yet.
# In case you need to adjust this timeout, supply a duration
# value as per https://pkg.go.dev/time#ParseDuration to `BACKUP_STOP_SERVICE_TIMEOUT`.
# Defaults to 5 minutes.
# BACKUP_STOP_SERVICE_TIMEOUT="5m"
# BACKUP_STOP_CONTAINER_LABEL="service1"
########### EXECUTING COMMANDS IN CONTAINERS PRE/POST BACKUP

4
go.mod
View File

@@ -10,7 +10,7 @@ require (
github.com/docker/cli v24.0.1+incompatible
github.com/docker/docker v24.0.7+incompatible
github.com/gofrs/flock v0.8.1
github.com/klauspost/compress v1.17.6
github.com/klauspost/compress v1.17.4
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.66
github.com/offen/envconfig v1.5.0
@@ -28,8 +28,6 @@ require (
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
github.com/joho/godotenv v1.5.1 // indirect
github.com/robfig/cron/v3 v3.0.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)

8
go.sum
View File

@@ -443,8 +443,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc=
github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -458,8 +456,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -595,8 +593,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=

View File

@@ -1,23 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
environment:
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: '7'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
- ${LOCAL_DIR:-./local}:/archive
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,34 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
export LOCAL_DIR=$(mktemp -d)
docker compose up -d --quiet-pull
sleep 5
ec=0
docker compose exec -e BACKUP_RETENTION_DAYS=7 -e BACKUP_FILENAME=test.tar.gz backup backup & \
{ set +e; sleep 0.1; docker compose exec -e BACKUP_FILENAME=test2.tar.gz -e LOCK_TIMEOUT=1s backup backup; ec=$?;}
if [ "$ec" = "0" ]; then
fail "Subsequent invocation exited 0"
fi
pass "Subsequent invocation did not exit 0"
sleep 5
if [ ! -f "${LOCAL_DIR}/test.tar.gz" ]; then
fail "Could not find expected tar file"
fi
pass "Found expected tar file"
if [ -f "${LOCAL_DIR}/test2.tar.gz" ]; then
fail "Subsequent invocation was expected to fail but created archive"
fi
pass "Subsequent invocation did not create archive"

View File

@@ -1,40 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
DOCKER_HOST: tcp://docker_socket_proxy:2375
volumes:
- pg_data:/backup/pg_data:ro
- ${LOCAL_DIR:-local}:/archive
docker_socket_proxy:
image: tecnativa/docker-socket-proxy:0.1
environment:
INFO: ${ALLOW_INFO:-1}
CONTAINERS: ${ALLOW_CONTAINERS:-1}
SERVICES: ${ALLOW_SERVICES:-1}
POST: ${ALLOW_POST:-1}
TASKS: ${ALLOW_TASKS:-1}
NODES: ${ALLOW_NODES:-1}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
volumes:
- pg_data:/var/lib/postgresql/data
deploy:
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
pg_data:

View File

@@ -1,36 +0,0 @@
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: Unlicense
version: '3.8'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
DOCKER_HOST: tcp://docker_socket_proxy:2375
volumes:
- pg_data:/backup/pg_data:ro
- ${LOCAL_DIR:-local}:/archive
docker_socket_proxy:
image: tecnativa/docker-socket-proxy:0.1
environment:
INFO: ${ALLOW_INFO:-1}
CONTAINERS: ${ALLOW_CONTAINERS:-1}
POST: ${ALLOW_POST:-1}
volumes:
- /var/run/docker.sock:/var/run/docker.sock
pg:
image: postgres:14-alpine
environment:
POSTGRES_PASSWORD: example
volumes:
- pg_data:/var/lib/postgresql/data
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
pg_data:

View File

@@ -1,76 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
export LOCAL_DIR=$(mktemp -d)
docker compose up -d --quiet-pull
sleep 5
# The default configuration in docker-compose.yml should
# successfully create a backup.
docker compose exec backup backup
sleep 5
expect_running_containers "3"
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
fail "Archive was not created"
fi
pass "Found relevant archive file."
# Disabling POST should make the backup run fail
ALLOW_POST="0" docker compose up -d
sleep 5
set +e
docker compose exec backup backup
if [ $? = "0" ]; then
fail "Expected invocation to exit non-zero."
fi
set -e
pass "Invocation exited non-zero."
docker compose down --volumes
# Next, the test is run against a Swarm setup
docker swarm init
export LOCAL_DIR=$(mktemp -d)
docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
sleep 20
# The default configuration in docker-compose.swarm.yml should
# successfully create a backup in Swarm mode.
docker exec $(docker ps -q -f name=backup) backup
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
fail "Archive was not created"
fi
pass "Found relevant archive file."
sleep 5
expect_running_containers "3"
# Disabling POST should make the backup run fail
ALLOW_POST="0" docker stack deploy --compose-file=docker-compose.swarm.yml test_stack
sleep 20
set +e
docker exec $(docker ps -q -f name=backup) backup
if [ $? = "0" ]; then
fail "Expected invocation to exit non-zero."
fi
set -e
pass "Invocation exited non-zero."