Compare commits

..

18 Commits

Author SHA1 Message Date
Frederik Ring
7a75b725dc shorten log messages 2021-08-23 07:07:44 +02:00
Frederik Ring
8c46bd54aa make sure backup also runs when socket isn't present 2021-08-22 22:44:41 +02:00
Frederik Ring
edefe69e6f read all configuration in init 2021-08-22 22:07:22 +02:00
Frederik Ring
01d1a60bb7 use go native strftime version 2021-08-22 21:14:28 +02:00
Frederik Ring
f15379795f fix location of success message for having created local backup 2021-08-22 20:49:46 +02:00
Frederik Ring
053422ba0e only tag proper releases as latest 2021-08-22 20:16:24 +02:00
Frederik Ring
39b933b8e8 improve logging messages 2021-08-22 20:03:18 +02:00
Frederik Ring
8161ad7f8f add insecure option, update docs 2021-08-22 19:26:34 +02:00
Frederik Ring
34e01fa303 adapt repo layout to go 2021-08-22 18:16:40 +02:00
Frederik Ring
2554c538ea add logging 2021-08-22 18:00:10 +02:00
Frederik Ring
77f948d4da refactor deferred cleanup actions to always run 2021-08-22 16:19:41 +02:00
Frederik Ring
d388785222 implement pruning from remote storage 2021-08-22 15:17:23 +02:00
Frederik Ring
b46c402b19 add gpg encryption 2021-08-22 14:44:33 +02:00
Frederik Ring
29f8a078bc implement deletion of local backups 2021-08-22 14:19:25 +02:00
Frederik Ring
3f7c08d616 implement lock file to ensure backup runs mutually exclusive 2021-08-22 12:02:22 +02:00
Frederik Ring
8b7d27740a implement copy to remote storage 2021-08-21 21:38:26 +02:00
Frederik Ring
9ddddc139a scaffold script flow 2021-08-21 19:55:22 +02:00
Frederik Ring
d34b525804 try porting docker related parts to golang 2021-08-21 19:05:49 +02:00
6 changed files with 59 additions and 65 deletions

View File

@@ -11,10 +11,6 @@ jobs:
name: Build name: Build
command: | command: |
docker build . -t offen/docker-volume-backup:canary docker build . -t offen/docker-volume-backup:canary
- run:
name: Install gnupg
command: |
sudo apt-get install -y gnupg
- run: - run:
name: Run tests name: Run tests
working_directory: ~/docker-volume-backup/test working_directory: ~/docker-volume-backup/test

View File

@@ -2,7 +2,7 @@
Backup Docker volumes locally or to any S3 compatible storage. Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both), and rotates away old backups if configured. The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a sidecar container to an existing Docker setup. It handles recurring backups of Docker volumes to a local directory or any S3 compatible storage (or both) and rotates away old backups if configured.
## Configuration ## Configuration
@@ -177,8 +177,8 @@ docker exec <container_ref> backup
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements: This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and additional tools, making it very heavy. This version is roughly 1/25 in compressed size (it's ~12MB). - The original image is based on `ubuntu` and additional tools, making it very heavy. This version is roughly 1/25 in compressed size (it's ~12MB).
- The original image uses a shell script, when this is written in Go, which makes it easier to extend and maintain (more verbose also). - The original image uses a shell script, when this is written in Go.
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO. Local copies of backups can also be pruned once they reach a certain age. - The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO. Local copies of backups can also be pruned once they reach a certain age.
- InfluxDB specific functionality from the original image was removed. - InfluxDB specific functionality was removed.
- `arm64` and `arm/v7` architectures are supported. - `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported. - Docker in Swarm mode is supported.

View File

@@ -4,7 +4,6 @@
package main package main
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"errors" "errors"
@@ -44,8 +43,6 @@ func main() {
s.logger.Info("Finished running backup tasks.") s.logger.Info("Finished running backup tasks.")
} }
// script holds all the stateful information required to orchestrate a
// single backup run.
type script struct { type script struct {
ctx context.Context ctx context.Context
cli *client.Client cli *client.Client
@@ -65,10 +62,27 @@ type script struct {
pruningPrefix string pruningPrefix string
} }
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. When invoked while the file is
// still locked the function panics.
func lock(lockfile string) func() error {
lf, err := os.OpenFile(lockfile, os.O_CREATE, os.ModeAppend)
if err != nil {
panic(err)
}
return func() error {
if err := lf.Close(); err != nil {
return fmt.Errorf("lock: error releasing file lock: %w", err)
}
if err := os.Remove(lockfile); err != nil {
return fmt.Errorf("lock: error removing lock file: %w", err)
}
return nil
}
}
// init creates all resources needed for the script to perform actions against // init creates all resources needed for the script to perform actions against
// remote resources like the Docker engine or remote storage locations. All // remote resources like the Docker engine or remote storage locations.
// reading from env vars or other configuration sources is expected to happen
// in this method.
func (s *script) init() error { func (s *script) init() error {
s.ctx = context.Background() s.ctx = context.Background()
s.logger = logrus.New() s.logger = logrus.New()
@@ -164,25 +178,22 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
if err != nil { if err != nil {
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
} }
if len(containersToStop) == 0 {
return thunk()
}
s.logger.Infof( s.logger.Infof(
"Stopping %d container(s) labeled `%s` out of %d running container(s).", "Stopping %d containers labeled `%s` out of %d running container(s).",
len(containersToStop), len(containersToStop),
containerLabel, containerLabel,
len(allContainers), len(allContainers),
) )
var stoppedContainers []types.Container var stoppedContainers []types.Container
var stopErrors []error var errors []error
for _, container := range containersToStop { if len(containersToStop) != 0 {
if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil { for _, container := range containersToStop {
stopErrors = append(stopErrors, err) if err := s.cli.ContainerStop(s.ctx, container.ID, nil); err != nil {
} else { errors = append(errors, err)
stoppedContainers = append(stoppedContainers, container) } else {
stoppedContainers = append(stoppedContainers, container)
}
} }
} }
@@ -235,13 +246,17 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
return nil return nil
}() }()
if len(stopErrors) != 0 { var stopErr error
return fmt.Errorf( if len(errors) != 0 {
"stopContainersAndRun: %d error(s) stopping containers: %w", stopErr = fmt.Errorf(
len(stopErrors), "stopContainersAndRun: %d errors stopping containers: %w",
len(errors),
err, err,
) )
} }
if stopErr != nil {
return stopErr
}
return thunk() return thunk()
} }
@@ -265,10 +280,9 @@ func (s *script) encryptBackup() error {
return nil return nil
} }
output := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
_, name := path.Split(s.file) _, name := path.Split(s.file)
pt, err := openpgp.SymmetricallyEncrypt(buf, []byte(s.passphrase), &openpgp.FileHints{
pt, err := openpgp.SymmetricallyEncrypt(output, []byte(s.passphrase), &openpgp.FileHints{
IsBinary: true, IsBinary: true,
FileName: name, FileName: name,
}, nil) }, nil)
@@ -276,16 +290,20 @@ func (s *script) encryptBackup() error {
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
} }
file, err := os.Open(s.file) unencrypted, err := ioutil.ReadFile(s.file)
if err != nil { if err != nil {
return fmt.Errorf("encryptBackup: error opening backup file %s: %w", s.file, err) pt.Close()
return fmt.Errorf("encryptBackup: error reading unencrypted backup file: %w", err)
}
_, err = pt.Write(unencrypted)
if err != nil {
pt.Close()
return fmt.Errorf("encryptBackup: error writing backup contents: %w", err)
} }
fileReader := bufio.NewReader(file)
fileReader.WriteTo(pt)
pt.Close() pt.Close()
gpgFile := fmt.Sprintf("%s.gpg", s.file) gpgFile := fmt.Sprintf("%s.gpg", s.file)
if err := ioutil.WriteFile(gpgFile, output.Bytes(), os.ModeAppend); err != nil { if err := ioutil.WriteFile(gpgFile, buf.Bytes(), os.ModeAppend); err != nil {
return fmt.Errorf("encryptBackup: error writing encrypted version of backup: %w", err) return fmt.Errorf("encryptBackup: error writing encrypted version of backup: %w", err)
} }
@@ -412,7 +430,7 @@ func (s *script) pruneOldBackups() error {
) )
} }
var matches []string var matches []os.FileInfo
for _, candidate := range candidates { for _, candidate := range candidates {
fi, err := os.Stat(candidate) fi, err := os.Stat(candidate)
if err != nil { if err != nil {
@@ -424,14 +442,14 @@ func (s *script) pruneOldBackups() error {
} }
if fi.ModTime().Before(deadline) { if fi.ModTime().Before(deadline) {
matches = append(matches, candidate) matches = append(matches, fi)
} }
} }
if len(matches) != 0 && len(matches) != len(candidates) { if len(matches) != 0 && len(matches) != len(candidates) {
var errors []error var errors []error
for _, candidate := range matches { for _, candidate := range matches {
if err := os.Remove(candidate); err != nil { if err := os.Remove(candidate.Name()); err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
} }
@@ -469,26 +487,6 @@ func (s *script) must(err error) {
} }
} }
// lock opens a lockfile at the given location, keeping it locked until the
// caller invokes the returned release func. When invoked while the file is
// still locked the function panics.
func lock(lockfile string) func() error {
lf, err := os.OpenFile(lockfile, os.O_CREATE|os.O_RDWR, os.ModeAppend)
if err != nil {
panic(err)
}
return func() error {
if err := lf.Close(); err != nil {
return fmt.Errorf("lock: error releasing file lock: %w", err)
}
if err := os.Remove(lockfile); err != nil {
return fmt.Errorf("lock: error removing lock file: %w", err)
}
return nil
}
}
// copy creates a copy of the file located at `dst` at `src`.
func copy(src, dst string) error { func copy(src, dst string) error {
in, err := os.Open(src) in, err := os.Open(src)
if err != nil { if err != nil {

View File

@@ -9,6 +9,7 @@
set -e set -e
# Write cronjob env to file, fill in sensible defaults, and read them back in # Write cronjob env to file, fill in sensible defaults, and read them back in
mkdir -p /etc/backup
cat <<EOF > /etc/backup.env cat <<EOF > /etc/backup.env
BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}" BACKUP_SOURCES="${BACKUP_SOURCES:-/backup}"
BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}" BACKUP_CRON_EXPRESSION="${BACKUP_CRON_EXPRESSION:-@daily}"

View File

@@ -28,7 +28,6 @@ services:
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7} BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s BACKUP_PRUNING_LEEWAY: 5s
BACKUP_PRUNING_PREFIX: test BACKUP_PRUNING_PREFIX: test
GPG_PASSPHRASE: 1234secret
volumes: volumes:
- ./local:/archive - ./local:/archive
- app_data:/backup/app_data:ro - app_data:/backup/app_data:ro

View File

@@ -13,13 +13,11 @@ docker-compose exec backup backup
docker run --rm -it \ docker run --rm -it \
-v compose_backup_data:/data alpine \ -v compose_backup_data:/data alpine \
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test.tar.gz.gpg > /tmp/test.tar.gz && tar -xf /tmp/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
echo "[TEST:PASS] Found relevant files in untared remote backup." echo "[TEST:PASS] Found relevant files in untared remote backup."
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz tar -xf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz
echo "[TEST:PASS] Found relevant files in untared local backup." echo "[TEST:PASS] Found relevant files in untared local backup."
@@ -31,6 +29,8 @@ fi
echo "[TEST:PASS] All containers running post backup." echo "[TEST:PASS] All containers running post backup."
docker-compose down
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day