Compare commits

...

14 Commits

Author SHA1 Message Date
Frederik Ring
0c666d0c88 use lstat when checking whether file is a symlink 2021-11-03 18:07:55 +01:00
Frederik Ring
a0402b407d fix fileinfo mode comparison when checking for symlinks 2021-11-03 18:03:44 +01:00
Frederik Ring
3193e88fc0 os.FileInfo cannot be used for deleting files as it does not contain a full path 2021-11-02 06:40:37 +01:00
Frederik Ring
c391230be6 Merge pull request #31 from offen/exclude-symlink-candidates
Exclude symlinks from candidates when pruning local files
2021-10-31 20:07:51 +01:00
Frederik Ring
f946f36fb0 exclude symlinks from candidates when pruning local files
Previously, symlinks would be included in the set of candidates, but would
be skipped when pruning. This could lead to a wrong number of candidates
being printed in the log messages.
2021-10-29 09:00:37 +02:00
Frederik Ring
5245b5882f update README, save some indentation 2021-10-28 19:55:39 +02:00
schwannden
7f0f173115 adding option to skip tls verification error (#30)
* adding option to skip tls verification error

* merge options

* removed merged option from README

Co-authored-by: Schwannden Kuo <schwannden@mobagel.com>
2021-10-28 19:51:35 +02:00
Frederik Ring
ad7ec58322 add syntax highlighting 2021-10-23 17:45:57 +02:00
Frederik Ring
b7ab2fbacc add section about container timezones to the README 2021-10-23 17:44:30 +02:00
Frederik Ring
789fc656e8 Merge pull request #27 from offen/latest-symlink
Automatically create symlink to latest local backup if configured
2021-10-01 18:47:16 +02:00
Frederik Ring
c59b40f2df automatically create symlink to latest local backup if configured 2021-10-01 18:19:24 +02:00
Frederik Ring
cff418e735 fix README grammar 2021-10-01 08:48:20 +02:00
Frederik Ring
d7ccdd79fc Merge pull request #26 from offen/instance-profile
Allow s3 authentication via IAM role
2021-09-30 19:32:54 +02:00
Frederik Ring
bd73a2b5e4 allow s3 authentication via IAM role 2021-09-30 19:24:43 +02:00
4 changed files with 110 additions and 13 deletions

View File

@@ -17,6 +17,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
- [Using with Docker Swarm](#using-with-docker-swarm)
- [Manually triggering a backup](#manually-triggering-a-backup)
- [Recipes](#recipes)
@@ -120,6 +121,11 @@ You can populate below template according to your requirements and use it as you
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
# When storing local backups, a symlink to the latest backup can be created
# in case a value is given for this key. This has no effect on remote backups.
# BACKUP_LATEST_SYMLINK="backup.latest.tar.gz"
########### BACKUP STORAGE
# The name of the remote bucket that should be used for storing backups. If
@@ -134,6 +140,13 @@ You can populate below template according to your requirements and use it as you
# AWS_ACCESS_KEY_ID="<xxx>"
# AWS_SECRET_ACCESS_KEY="<xxx>"
# Instead of providing static credentials, you can also use IAM instance profiles
# or similar to provide authentication. Some possible configuration options on AWS:
# - EC2: http://169.254.169.254
# - ECS: http://169.254.170.2
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
# This is the FQDN of your storage server, e.g. `storage.example.com`.
# Do not set this when working against AWS S3 (the default value is
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
@@ -149,7 +162,8 @@ You can populate below template according to your requirements and use it as you
# Setting this variable to `true` will disable verification of
# SSL certificates. You shouldn't use this unless you use self-signed
# certificates for your remote storage backend.
# certificates for your remote storage backend. This can only be used
# when AWS_ENDPOINT_PROTO is set to `https`.
# AWS_ENDPOINT_INSECURE="true"
@@ -346,6 +360,27 @@ In case you need to restore a volume from a backup, the most straight forward pr
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
### Set the timezone the container runs in
By default a container based on this image will run in the UTC timezone.
As the image is designed to be as small as possible, additional timezone data is not included.
In case you want to run your cron rules in your local timezone (respecting DST and similar), you can mount your Docker host's `/etc/timezone` and `/etc/localtime` in read-only mode:
```yml
version: '3'
services:
backup:
image: offen/docker-volume-backup:latest
volumes:
- data:/backup/my-app-backup:ro
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
volumes:
data:
```
### Using with Docker Swarm
By default, Docker Swarm will restart stopped containers automatically, even when manually stopped.
@@ -430,6 +465,9 @@ services:
# ... define other services using the `data` volume here
backup:
image: offen/docker-volume-backup:latest
environment:
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
BACKUP_LATEST_SYMLINK: backup-latest.tar.gz
volumes:
- data:/backup/my-app-backup:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
@@ -567,7 +605,7 @@ volumes:
## Differences to `futurice/docker-volume-backup`
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
This image is heavily inspired by `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
- The original image is based on `ubuntu` and requires additional tools, making it heavy.
This version is roughly 1/25 in compressed size (it's ~12MB).
@@ -578,3 +616,5 @@ Local copies of backups can also be pruned once they reach a certain age.
- InfluxDB specific functionality from the original image was removed.
- `arm64` and `arm/v7` architectures are supported.
- Docker in Swarm mode is supported.
- Notifications on failed backups are supported
- IAM authentication through instance profiles is supported

View File

@@ -89,6 +89,7 @@ type script struct {
type config struct {
BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupLatestSymlink string `split_words:"true"`
BackupArchive string `split_words:"true" default:"/archive"`
BackupRetentionDays int32 `split_words:"true" default:"-1"`
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
@@ -100,6 +101,7 @@ type config struct {
AwsEndpointInsecure bool `split_words:"true"`
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"`
GpgPassphrase string `split_words:"true"`
EmailNotificationRecipient string `split_words:"true"`
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
@@ -145,14 +147,38 @@ func newScript() (*script, error) {
}
if s.c.AwsS3BucketName != "" {
mc, err := minio.New(s.c.AwsEndpoint, &minio.Options{
Creds: credentials.NewStaticV4(
var creds *credentials.Credentials
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
creds = credentials.NewStaticV4(
s.c.AwsAccessKeyID,
s.c.AwsSecretAccessKey,
"",
),
Secure: !s.c.AwsEndpointInsecure && s.c.AwsEndpointProto == "https",
})
)
} else if s.c.AwsIamRoleEndpoint != "" {
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
} else {
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
}
options := minio.Options{
Creds: creds,
Secure: s.c.AwsEndpointProto == "https",
}
if s.c.AwsEndpointInsecure {
if !options.Secure {
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
}
transport, err := minio.DefaultTransport(true)
if err != nil {
return nil, fmt.Errorf("newScript: failed to create default minio transport")
}
transport.TLSClientConfig.InsecureSkipVerify = true
options.Transport = transport
}
mc, err := minio.New(s.c.AwsEndpoint, &options)
if err != nil {
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
}
@@ -370,6 +396,16 @@ func (s *script) copyBackup() error {
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
}
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
if s.c.BackupLatestSymlink != "" {
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
if _, err := os.Lstat(symlink); err == nil {
os.Remove(symlink)
}
if err := os.Symlink(name, symlink); err != nil {
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
}
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
}
}
return nil
}
@@ -467,15 +503,35 @@ func (s *script) pruneOldBackups() error {
}
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
candidates, err := filepath.Glob(
path.Join(s.c.BackupArchive, fmt.Sprintf("%s*", s.c.BackupPruningPrefix)),
globPattern := path.Join(
s.c.BackupArchive,
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
)
globMatches, err := filepath.Glob(globPattern)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error looking up matching files, starting with: %w", err,
"pruneOldBackups: error looking up matching files using pattern %s: %w",
globPattern,
err,
)
}
var candidates []string
for _, candidate := range globMatches {
fi, err := os.Lstat(candidate)
if err != nil {
return fmt.Errorf(
"pruneOldBackups: error calling Lstat on file %s: %w",
candidate,
err,
)
}
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
candidates = append(candidates, candidate)
}
}
var matches []string
for _, candidate := range candidates {
fi, err := os.Stat(candidate)
@@ -486,7 +542,6 @@ func (s *script) pruneOldBackups() error {
err,
)
}
if fi.ModTime().Before(deadline) {
matches = append(matches, candidate)
}
@@ -494,8 +549,8 @@ func (s *script) pruneOldBackups() error {
if len(matches) != 0 && len(matches) != len(candidates) {
var removeErrors []error
for _, candidate := range matches {
if err := os.Remove(candidate); err != nil {
for _, match := range matches {
if err := os.Remove(match); err != nil {
removeErrors = append(removeErrors, err)
}
}

View File

@@ -24,6 +24,7 @@ services:
AWS_ENDPOINT_PROTO: http
AWS_S3_BUCKET_NAME: backup
BACKUP_FILENAME: test.tar.gz
BACKUP_LATEST_SYMLINK: test.latest.tar.gz.gpg
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
BACKUP_PRUNING_LEEWAY: 5s

View File

@@ -18,6 +18,7 @@ docker run --rm -it \
echo "[TEST:PASS] Found relevant files in untared remote backup."
test -L ./local/test.latest.tar.gz.gpg
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
rm ./local/decrypted.tar.gz