mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5245b5882f | ||
|
|
7f0f173115 | ||
|
|
ad7ec58322 | ||
|
|
b7ab2fbacc | ||
|
|
789fc656e8 | ||
|
|
c59b40f2df | ||
|
|
cff418e735 | ||
|
|
d7ccdd79fc | ||
|
|
bd73a2b5e4 |
44
README.md
44
README.md
@@ -17,6 +17,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
||||||
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
||||||
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
||||||
|
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
||||||
- [Using with Docker Swarm](#using-with-docker-swarm)
|
- [Using with Docker Swarm](#using-with-docker-swarm)
|
||||||
- [Manually triggering a backup](#manually-triggering-a-backup)
|
- [Manually triggering a backup](#manually-triggering-a-backup)
|
||||||
- [Recipes](#recipes)
|
- [Recipes](#recipes)
|
||||||
@@ -120,6 +121,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
|
||||||
|
# When storing local backups, a symlink to the latest backup can be created
|
||||||
|
# in case a value is given for this key. This has no effect on remote backups.
|
||||||
|
|
||||||
|
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
|
||||||
|
|
||||||
########### BACKUP STORAGE
|
########### BACKUP STORAGE
|
||||||
|
|
||||||
# The name of the remote bucket that should be used for storing backups. If
|
# The name of the remote bucket that should be used for storing backups. If
|
||||||
@@ -134,6 +140,13 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# AWS_ACCESS_KEY_ID="<xxx>"
|
# AWS_ACCESS_KEY_ID="<xxx>"
|
||||||
# AWS_SECRET_ACCESS_KEY="<xxx>"
|
# AWS_SECRET_ACCESS_KEY="<xxx>"
|
||||||
|
|
||||||
|
# Instead of providing static credentials, you can also use IAM instance profiles
|
||||||
|
# or similar to provide authentication. Some possible configuration options on AWS:
|
||||||
|
# - EC2: http://169.254.169.254
|
||||||
|
# - ECS: http://169.254.170.2
|
||||||
|
|
||||||
|
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
|
||||||
|
|
||||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||||
# Do not set this when working against AWS S3 (the default value is
|
# Do not set this when working against AWS S3 (the default value is
|
||||||
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
||||||
@@ -149,7 +162,8 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# Setting this variable to `true` will disable verification of
|
# Setting this variable to `true` will disable verification of
|
||||||
# SSL certificates. You shouldn't use this unless you use self-signed
|
# SSL certificates. You shouldn't use this unless you use self-signed
|
||||||
# certificates for your remote storage backend.
|
# certificates for your remote storage backend. This can only be used
|
||||||
|
# when AWS_ENDPOINT_PROTO is set to `https`.
|
||||||
|
|
||||||
# AWS_ENDPOINT_INSECURE="true"
|
# AWS_ENDPOINT_INSECURE="true"
|
||||||
|
|
||||||
@@ -346,6 +360,27 @@ In case you need to restore a volume from a backup, the most straight forward pr
|
|||||||
|
|
||||||
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
||||||
|
|
||||||
|
### Set the timezone the container runs in
|
||||||
|
|
||||||
|
By default a container based on this image will run in the UTC timezone.
|
||||||
|
As the image is designed to be as small as possible, additional timezone data is not included.
|
||||||
|
In case you want to run your cron rules in your local timezone (respecting DST and similar), you can mount your Docker host's `/etc/timezone` and `/etc/localtime` in read-only mode:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Using with Docker Swarm
|
### Using with Docker Swarm
|
||||||
|
|
||||||
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped.
|
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped.
|
||||||
@@ -430,6 +465,9 @@ services:
|
|||||||
# ... define other services using the `data` volume here
|
# ... define other services using the `data` volume here
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:latest
|
image: offen/docker-volume-backup:latest
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
|
||||||
volumes:
|
volumes:
|
||||||
- data:/backup/my-app-backup:ro
|
- data:/backup/my-app-backup:ro
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
@@ -567,7 +605,7 @@ volumes:
|
|||||||
|
|
||||||
## Differences to `futurice/docker-volume-backup`
|
## Differences to `futurice/docker-volume-backup`
|
||||||
|
|
||||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
This image is heavily inspired by `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
|
||||||
This version is roughly 1/25 in compressed size (it's ~12MB).
|
This version is roughly 1/25 in compressed size (it's ~12MB).
|
||||||
@@ -578,3 +616,5 @@ Local copies of backups can also be pruned once they reach a certain age.
|
|||||||
- InfluxDB specific functionality from the original image was removed.
|
- InfluxDB specific functionality from the original image was removed.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
|
- Notifications on failed backups are supported
|
||||||
|
- IAM authentication through instance profiles is supported
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ type script struct {
|
|||||||
type config struct {
|
type config struct {
|
||||||
BackupSources string `split_words:"true" default:"/backup"`
|
BackupSources string `split_words:"true" default:"/backup"`
|
||||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
||||||
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
@@ -100,6 +101,7 @@ type config struct {
|
|||||||
AwsEndpointInsecure bool `split_words:"true"`
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
GpgPassphrase string `split_words:"true"`
|
GpgPassphrase string `split_words:"true"`
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
||||||
@@ -145,14 +147,38 @@ func newScript() (*script, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if s.c.AwsS3BucketName != "" {
|
if s.c.AwsS3BucketName != "" {
|
||||||
mc, err := minio.New(s.c.AwsEndpoint, &minio.Options{
|
var creds *credentials.Credentials
|
||||||
Creds: credentials.NewStaticV4(
|
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
|
||||||
|
creds = credentials.NewStaticV4(
|
||||||
s.c.AwsAccessKeyID,
|
s.c.AwsAccessKeyID,
|
||||||
s.c.AwsSecretAccessKey,
|
s.c.AwsSecretAccessKey,
|
||||||
"",
|
"",
|
||||||
),
|
)
|
||||||
Secure: !s.c.AwsEndpointInsecure && s.c.AwsEndpointProto == "https",
|
} else if s.c.AwsIamRoleEndpoint != "" {
|
||||||
})
|
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
options := minio.Options{
|
||||||
|
Creds: creds,
|
||||||
|
Secure: s.c.AwsEndpointProto == "https",
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.AwsEndpointInsecure {
|
||||||
|
if !options.Secure {
|
||||||
|
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||||
|
}
|
||||||
|
|
||||||
|
transport, err := minio.DefaultTransport(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to create default minio transport")
|
||||||
|
}
|
||||||
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
|
options.Transport = transport
|
||||||
|
}
|
||||||
|
|
||||||
|
mc, err := minio.New(s.c.AwsEndpoint, &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
||||||
}
|
}
|
||||||
@@ -370,6 +396,16 @@ func (s *script) copyBackup() error {
|
|||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
|
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
|
||||||
|
if s.c.BackupLatestSymlink != "" {
|
||||||
|
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
|
||||||
|
if _, err := os.Lstat(symlink); err == nil {
|
||||||
|
os.Remove(symlink)
|
||||||
|
}
|
||||||
|
if err := os.Symlink(name, symlink); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -487,7 +523,7 @@ func (s *script) pruneOldBackups() error {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi.ModTime().Before(deadline) {
|
if fi.Mode() != os.ModeSymlink && fi.ModTime().Before(deadline) {
|
||||||
matches = append(matches, candidate)
|
matches = append(matches, candidate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ services:
|
|||||||
AWS_ENDPOINT_PROTO: http
|
AWS_ENDPOINT_PROTO: http
|
||||||
AWS_S3_BUCKET_NAME: backup
|
AWS_S3_BUCKET_NAME: backup
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test.latest.tar.gz.gpg
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ docker run --rm -it \
|
|||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
test -L ./local/test.latest.tar.gz.gpg
|
||||||
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||||
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||||
rm ./local/decrypted.tar.gz
|
rm ./local/decrypted.tar.gz
|
||||||
|
|||||||
Reference in New Issue
Block a user