mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a57e93d01e | ||
|
|
3e17d1b123 | ||
|
|
0e248010a8 | ||
|
|
e6af6efd8a | ||
|
|
34d04211eb | ||
|
|
8dfdd14527 | ||
|
|
3bb99a7117 | ||
|
|
ddc34be55d | ||
|
|
cb9b4bfcff | ||
|
|
62bd2f4a5a | ||
|
|
6fe629ce87 | ||
|
|
1db896f7cf | ||
|
|
6ded00aa06 | ||
|
|
6b79f1914b | ||
|
|
40ff2e00c9 | ||
|
|
760cc9cebc | ||
|
|
1f9582df51 | ||
|
|
32575c831e |
20
.github/ISSUE_TEMPLATE.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
* **I'm submitting a ...**
|
||||||
|
- [ ] bug report
|
||||||
|
- [ ] feature request
|
||||||
|
- [ ] support request
|
||||||
|
|
||||||
|
* **What is the current behavior?**
|
||||||
|
|
||||||
|
* **If the current behavior is a bug, please provide the configuration and steps to reproduce and if possible a minimal demo of the problem.**
|
||||||
|
|
||||||
|
* **What is the expected behavior?**
|
||||||
|
|
||||||
|
* **What is the motivation / use case for changing the behavior?**
|
||||||
|
|
||||||
|
* **Please tell us about your environment:**
|
||||||
|
|
||||||
|
- Image version:
|
||||||
|
- Docker version:
|
||||||
|
- docker-compose version:
|
||||||
|
|
||||||
|
* **Other information** (e.g. detailed explanation, stacktraces, related issues, suggestions how to fix, links for us to have context, eg. stackoverflow, etc)
|
||||||
@@ -6,16 +6,17 @@ FROM golang:1.17-alpine as builder
|
|||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY go.mod go.sum ./
|
COPY go.mod go.sum ./
|
||||||
RUN go mod download
|
RUN go mod download
|
||||||
COPY cmd/backup/main.go ./cmd/backup/main.go
|
COPY cmd/backup ./cmd/backup/
|
||||||
RUN go build -o backup cmd/backup/main.go
|
WORKDIR /app/cmd/backup
|
||||||
|
RUN go build -o backup .
|
||||||
|
|
||||||
FROM alpine:3.14
|
FROM alpine:3.15
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --update ca-certificates
|
RUN apk add --update ca-certificates
|
||||||
|
|
||||||
COPY --from=builder /app/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
|
|
||||||
COPY ./entrypoint.sh /root/
|
COPY ./entrypoint.sh /root/
|
||||||
RUN chmod +x entrypoint.sh
|
RUN chmod +x entrypoint.sh
|
||||||
|
|||||||
130
README.md
130
README.md
@@ -1,9 +1,13 @@
|
|||||||
|
<a href="https://www.offen.dev/">
|
||||||
|
<img src="https://offen.github.io/press-kit/offen-material/gfx-GitHub-Offen-logo.svg" alt="Offen logo" title="Offen" width="150px"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
# docker-volume-backup
|
# docker-volume-backup
|
||||||
|
|
||||||
Backup Docker volumes locally or to any S3 compatible storage.
|
Backup Docker volumes locally or to any S3 compatible storage.
|
||||||
|
|
||||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
||||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__ or __any S3 compatible storage__ (or both), and __rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3 or WebDAV compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||||
|
|
||||||
<!-- MarkdownTOC -->
|
<!-- MarkdownTOC -->
|
||||||
|
|
||||||
@@ -15,6 +19,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Stopping containers during backup](#stopping-containers-during-backup)
|
- [Stopping containers during backup](#stopping-containers-during-backup)
|
||||||
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
- [Automatically pruning old backups](#automatically-pruning-old-backups)
|
||||||
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
- [Send email notifications on failed backup runs](#send-email-notifications-on-failed-backup-runs)
|
||||||
|
- [Customize notifications](#customize-notifications)
|
||||||
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
- [Encrypting your backup using GPG](#encrypting-your-backup-using-gpg)
|
||||||
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
- [Restoring a volume from a backup](#restoring-a-volume-from-a-backup)
|
||||||
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
- [Set the timezone the container runs in](#set-the-timezone-the-container-runs-in)
|
||||||
@@ -23,7 +28,9 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
- [Update deprecated email configuration](#update-deprecated-email-configuration)
|
||||||
- [Recipes](#recipes)
|
- [Recipes](#recipes)
|
||||||
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
||||||
|
- [Backing up to Filebase](#backing-up-to-filebase)
|
||||||
- [Backing up to MinIO](#backing-up-to-minio)
|
- [Backing up to MinIO](#backing-up-to-minio)
|
||||||
|
- [Backing up to WebDAV](#backing-up-to-webdav)
|
||||||
- [Backing up locally](#backing-up-locally)
|
- [Backing up locally](#backing-up-locally)
|
||||||
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
||||||
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
||||||
@@ -123,14 +130,14 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||||
|
|
||||||
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
||||||
# placeholders in BACKUP_FILENAME and in BACKUP_LATEST_SYMLINK that will get
|
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
||||||
# expanded at runtime, e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`.
|
# BACKUP_PRUNING_PREFIX that will get expanded at runtime,
|
||||||
# Expansion happens before interpolating strftime tokens.
|
# e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`. Expansion happens before
|
||||||
# It is disabled by default.
|
# interpolating strftime tokens. It is disabled by default.
|
||||||
# Please note that you will need to escape the `$` when providing the value
|
# Please note that you will need to escape the `$` when providing the value
|
||||||
# in a docker-compose.yml file, i.e. using $$VAR instead of $VAR.
|
# in a docker-compose.yml file, i.e. using $$VAR instead of $VAR.
|
||||||
|
|
||||||
# BACKUP_FILENAME_TEMPLATE="true"
|
# BACKUP_FILENAME_EXPAND="true"
|
||||||
|
|
||||||
# When storing local backups, a symlink to the latest backup can be created
|
# When storing local backups, a symlink to the latest backup can be created
|
||||||
# in case a value is given for this key. This has no effect on remote backups.
|
# in case a value is given for this key. This has no effect on remote backups.
|
||||||
@@ -151,6 +158,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# AWS_S3_BUCKET_NAME="backup-bucket"
|
# AWS_S3_BUCKET_NAME="backup-bucket"
|
||||||
|
|
||||||
|
# If you want to store the backup in a non-root location on your bucket
|
||||||
|
# you can provide a path. The path must not contain a leading slash.
|
||||||
|
|
||||||
|
# AWS_S3_PATH="my/backup/location"
|
||||||
|
|
||||||
# Define credentials for authenticating against the backup storage and a bucket
|
# Define credentials for authenticating against the backup storage and a bucket
|
||||||
# name. Although all of these keys are `AWS`-prefixed, the setup can be used
|
# name. Although all of these keys are `AWS`-prefixed, the setup can be used
|
||||||
# with any S3 compatible storage.
|
# with any S3 compatible storage.
|
||||||
@@ -166,7 +178,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
|
# AWS_IAM_ROLE_ENDPOINT="http://169.254.169.254"
|
||||||
|
|
||||||
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
# This is the FQDN of your storage server, e.g. `storage.example.com`.
|
||||||
# Do not set this when working against AWS S3 (the default value is
|
# Do not set this when working against AWS S3 (the default value is
|
||||||
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
# `s3.amazonaws.com`). If you need to set a specific (non-https) protocol, you
|
||||||
# will need to use the option below.
|
# will need to use the option below.
|
||||||
|
|
||||||
@@ -185,6 +197,25 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# AWS_ENDPOINT_INSECURE="true"
|
# AWS_ENDPOINT_INSECURE="true"
|
||||||
|
|
||||||
|
# You can also backup files to any WebDAV server:
|
||||||
|
|
||||||
|
# The URL of the remote WebDAV server
|
||||||
|
|
||||||
|
# WEBDAV_URL="https://webdav.example.com"
|
||||||
|
|
||||||
|
# The Directory to place the backups to on the WebDAV server.
|
||||||
|
# If the path is not present on the server it will be created.
|
||||||
|
|
||||||
|
# WEBDAV_PATH="/my/directory/"
|
||||||
|
|
||||||
|
# The username for the WebDAV server
|
||||||
|
|
||||||
|
# WEBDAV_USERNAME="user"
|
||||||
|
|
||||||
|
# The password for the WebDAV server
|
||||||
|
|
||||||
|
# WEBDAV_PASSWORD="password"
|
||||||
|
|
||||||
# In addition to storing backups remotely, you can also keep local copies.
|
# In addition to storing backups remotely, you can also keep local copies.
|
||||||
# Pass a container-local path to store your backups if needed. You also need to
|
# Pass a container-local path to store your backups if needed. You also need to
|
||||||
# mount a local folder or Docker volume into that location (`/archive`
|
# mount a local folder or Docker volume into that location (`/archive`
|
||||||
@@ -251,8 +282,9 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
||||||
# When providing multiple URLs or an URL that contains a comma, the values
|
# The content of such notifications can be customized. Dedicated documentation
|
||||||
# can be URL encoded to avoid ambiguities.
|
# on how to do this can be found in the README. When providing multiple URLs or
|
||||||
|
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||||
|
|
||||||
# The below URL demonstrates how to send an email using the provided SMTP
|
# The below URL demonstrates how to send an email using the provided SMTP
|
||||||
# configuration and credentials.
|
# configuration and credentials.
|
||||||
@@ -378,6 +410,30 @@ Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about option
|
|||||||
|
|
||||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
||||||
|
|
||||||
|
### Customize notifications
|
||||||
|
|
||||||
|
The title and body of the notifications can be easily tailored to your needs using [go templates](https://pkg.go.dev/text/template).
|
||||||
|
Templates must be mounted inside the container in `/etc/dockervolumebackup/notifications.d/`: any file inside this directory will be parsed.
|
||||||
|
|
||||||
|
The files have to define [nested templates](https://pkg.go.dev/text/template#hdr-Nested_template_definitions) in order to override the original values. An example:
|
||||||
|
```
|
||||||
|
{{ define "title_success" -}}
|
||||||
|
✅ Successfully ran backup {{ .Config.BackupStopContainerLabel }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{ define "body_success" -}}
|
||||||
|
▶️ Start time: {{ .Stats.StartTime | formatTime }}
|
||||||
|
⏹️ End time: {{ .Stats.EndTime | formatTime }}
|
||||||
|
⌛ Took time: {{ .Stats.TookTime }}
|
||||||
|
🛑 Stopped containers: {{ .Stats.Containers.Stopped }}/{{ .Stats.Containers.All }} ({{ .Stats.Containers.StopErrors }} errors)
|
||||||
|
⚖️ Backup size: {{ .Stats.BackupFile.Size | formatBytesBin }} / {{ .Stats.BackupFile.Size | formatBytesDec }}
|
||||||
|
🗑️ Pruned backups: {{ .Stats.Storages.Local.Pruned }}/{{ .Stats.Storages.Local.Total }} ({{ .Stats.Storages.Local.PruneErrors }} errors)
|
||||||
|
{{- end }}
|
||||||
|
```
|
||||||
|
|
||||||
|
Overridable template names are: `title_success`, `body_success`, `title_failure`, `body_failure`.
|
||||||
|
|
||||||
|
For a full list of available variables and functions, see [this page](https://github.com/offen/docker-volume-backup/blob/master/docs/NOTIFICATION-TEMPLATES.md).
|
||||||
|
|
||||||
### Encrypting your backup using GPG
|
### Encrypting your backup using GPG
|
||||||
|
|
||||||
@@ -399,14 +455,14 @@ In case you need to restore a volume from a backup, the most straight forward pr
|
|||||||
```console
|
```console
|
||||||
tar -C /tmp -xvf backup.tar.gz
|
tar -C /tmp -xvf backup.tar.gz
|
||||||
```
|
```
|
||||||
- Using a temporary one-off container, mount the volume (the example assumes it's named `data`) and copy over the backup. Make sure you copy the correct path level (this depends on how you mount your volume into the backup container), you might need to strip some leading elements
|
- Using a temporary once-off container, mount the volume (the example assumes it's named `data`) and copy over the backup. Make sure you copy the correct path level (this depends on how you mount your volume into the backup container), you might need to strip some leading elements
|
||||||
```console
|
```console
|
||||||
docker run -d --name backup_restore -v data:/backup_restore alpine
|
docker run -d --name temp_restore_container -v data:/backup_restore alpine
|
||||||
docker cp /tmp/backup/data-backup backup_restore:/backup_restore
|
docker cp /tmp/backup/data-backup temp_restore_container:/backup_restore
|
||||||
docker stop backup_restore
|
docker stop temp_restore_container
|
||||||
docker rm backup_restore
|
docker rm temp_restore_container
|
||||||
```
|
```
|
||||||
- Restart the container(s) that are using the volume
|
- Restart the container(s) that are using the volume
|
||||||
|
|
||||||
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
Depending on your setup and the application(s) you are running, this might involve other steps to be taken still.
|
||||||
|
|
||||||
@@ -504,6 +560,28 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Backing up to Filebase
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
environment:
|
||||||
|
AWS_ENDPOINT: s3.filebase.com
|
||||||
|
AWS_BUCKET_NAME: filebase-bucket
|
||||||
|
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
|
||||||
|
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Backing up to MinIO
|
### Backing up to MinIO
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
@@ -526,6 +604,28 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Backing up to WebDAV
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:latest
|
||||||
|
environment:
|
||||||
|
WEBDAV_URL: https://webdav.mydomain.me
|
||||||
|
WEBDAV_PATH: /my/directory/
|
||||||
|
WEBDAV_USERNAME: user
|
||||||
|
WEBDAV_PASSWORD: password
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Backing up locally
|
### Backing up locally
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
|||||||
42
cmd/backup/config.go
Normal file
42
cmd/backup/config.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Config holds all configuration values that are expected to be set
|
||||||
|
// by users.
|
||||||
|
type Config struct {
|
||||||
|
BackupSources string `split_words:"true" default:"/backup"`
|
||||||
|
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
||||||
|
BackupFilenameExpand bool `split_words:"true"`
|
||||||
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
|
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||||
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
|
AwsS3BucketName string `split_words:"true"`
|
||||||
|
AwsS3Path string `split_words:"true"`
|
||||||
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
|
AwsEndpointProto string `split_words:"true" default:"https"`
|
||||||
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
|
GpgPassphrase string `split_words:"true"`
|
||||||
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
|
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
||||||
|
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
||||||
|
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
||||||
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
|
WebdavUrl string `split_words:"true"`
|
||||||
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
|
WebdavUsername string `split_words:"true"`
|
||||||
|
WebdavPassword string `split_words:"true"`
|
||||||
|
}
|
||||||
56
cmd/backup/hooks.go
Normal file
56
cmd/backup/hooks.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// hook contains a queued action that can be trigger them when the script
|
||||||
|
// reaches a certain point (e.g. unsuccessful backup)
|
||||||
|
type hook struct {
|
||||||
|
level hookLevel
|
||||||
|
action func(err error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type hookLevel int
|
||||||
|
|
||||||
|
const (
|
||||||
|
hookLevelPlumbing hookLevel = iota
|
||||||
|
hookLevelError
|
||||||
|
hookLevelInfo
|
||||||
|
)
|
||||||
|
|
||||||
|
var hookLevels = map[string]hookLevel{
|
||||||
|
"info": hookLevelInfo,
|
||||||
|
"error": hookLevelError,
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerHook adds the given action at the given level.
|
||||||
|
func (s *script) registerHook(level hookLevel, action func(err error) error) {
|
||||||
|
s.hooks = append(s.hooks, hook{level, action})
|
||||||
|
}
|
||||||
|
|
||||||
|
// runHooks runs all hooks that have been registered using the
|
||||||
|
// given levels in the defined ordering. In case executing a hook returns an
|
||||||
|
// error, the following hooks will still be run before the function returns.
|
||||||
|
func (s *script) runHooks(err error) error {
|
||||||
|
sort.SliceStable(s.hooks, func(i, j int) bool {
|
||||||
|
return s.hooks[i].level < s.hooks[j].level
|
||||||
|
})
|
||||||
|
var actionErrors []error
|
||||||
|
for _, hook := range s.hooks {
|
||||||
|
if hook.level > s.hookLevel {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if actionErr := hook.action(err); actionErr != nil {
|
||||||
|
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(actionErrors) != 0 {
|
||||||
|
return join(actionErrors...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,37 +1,10 @@
|
|||||||
// Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containrrr/shoutrrr"
|
|
||||||
"github.com/containrrr/shoutrrr/pkg/router"
|
|
||||||
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/gofrs/flock"
|
|
||||||
"github.com/kelseyhightower/envconfig"
|
|
||||||
"github.com/leekchan/timeutil"
|
|
||||||
"github.com/m90/targz"
|
|
||||||
"github.com/minio/minio-go/v7"
|
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
|
||||||
"github.com/otiai10/copy"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/crypto/openpgp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -80,752 +53,5 @@ func main() {
|
|||||||
|
|
||||||
s.must(s.encryptBackup())
|
s.must(s.encryptBackup())
|
||||||
s.must(s.copyBackup())
|
s.must(s.copyBackup())
|
||||||
s.must(s.pruneOldBackups())
|
s.must(s.pruneBackups())
|
||||||
}
|
|
||||||
|
|
||||||
// script holds all the stateful information required to orchestrate a
|
|
||||||
// single backup run.
|
|
||||||
type script struct {
|
|
||||||
cli *client.Client
|
|
||||||
mc *minio.Client
|
|
||||||
logger *logrus.Logger
|
|
||||||
sender *router.ServiceRouter
|
|
||||||
hooks []hook
|
|
||||||
hookLevel hookLevel
|
|
||||||
|
|
||||||
start time.Time
|
|
||||||
file string
|
|
||||||
output *bytes.Buffer
|
|
||||||
|
|
||||||
c *config
|
|
||||||
}
|
|
||||||
|
|
||||||
type config struct {
|
|
||||||
BackupSources string `split_words:"true" default:"/backup"`
|
|
||||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
|
||||||
BackupFilenameExpand bool `split_words:"true"`
|
|
||||||
BackupLatestSymlink string `split_words:"true"`
|
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
|
||||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
|
||||||
BackupFromSnapshot bool `split_words:"true"`
|
|
||||||
AwsS3BucketName string `split_words:"true"`
|
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
|
||||||
AwsEndpointProto string `split_words:"true" default:"https"`
|
|
||||||
AwsEndpointInsecure bool `split_words:"true"`
|
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
|
||||||
AwsIamRoleEndpoint string `split_words:"true"`
|
|
||||||
GpgPassphrase string `split_words:"true"`
|
|
||||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
|
||||||
NotificationLevel string `split_words:"true" default:"error"`
|
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
|
||||||
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
|
||||||
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
|
||||||
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
|
||||||
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
|
||||||
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var msgBackupFailed = "backup run failed"
|
|
||||||
|
|
||||||
// newScript creates all resources needed for the script to perform actions against
|
|
||||||
// remote resources like the Docker engine or remote storage locations. All
|
|
||||||
// reading from env vars or other configuration sources is expected to happen
|
|
||||||
// in this method.
|
|
||||||
func newScript() (*script, error) {
|
|
||||||
stdOut, logBuffer := buffer(os.Stdout)
|
|
||||||
s := &script{
|
|
||||||
c: &config{},
|
|
||||||
logger: &logrus.Logger{
|
|
||||||
Out: stdOut,
|
|
||||||
Formatter: new(logrus.TextFormatter),
|
|
||||||
Hooks: make(logrus.LevelHooks),
|
|
||||||
Level: logrus.InfoLevel,
|
|
||||||
},
|
|
||||||
start: time.Now(),
|
|
||||||
output: logBuffer,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := envconfig.Process("", s.c); err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
|
||||||
if s.c.BackupFilenameExpand {
|
|
||||||
s.file = os.ExpandEnv(s.file)
|
|
||||||
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
|
||||||
}
|
|
||||||
s.file = timeutil.Strftime(&s.start, s.file)
|
|
||||||
|
|
||||||
_, err := os.Stat("/var/run/docker.sock")
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to create docker client")
|
|
||||||
}
|
|
||||||
s.cli = cli
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.AwsS3BucketName != "" {
|
|
||||||
var creds *credentials.Credentials
|
|
||||||
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
|
|
||||||
creds = credentials.NewStaticV4(
|
|
||||||
s.c.AwsAccessKeyID,
|
|
||||||
s.c.AwsSecretAccessKey,
|
|
||||||
"",
|
|
||||||
)
|
|
||||||
} else if s.c.AwsIamRoleEndpoint != "" {
|
|
||||||
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
options := minio.Options{
|
|
||||||
Creds: creds,
|
|
||||||
Secure: s.c.AwsEndpointProto == "https",
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.AwsEndpointInsecure {
|
|
||||||
if !options.Secure {
|
|
||||||
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
|
||||||
}
|
|
||||||
|
|
||||||
transport, err := minio.DefaultTransport(true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: failed to create default minio transport")
|
|
||||||
}
|
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
|
||||||
options.Transport = transport
|
|
||||||
}
|
|
||||||
|
|
||||||
mc, err := minio.New(s.c.AwsEndpoint, &options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
|
||||||
}
|
|
||||||
s.mc = mc
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.EmailNotificationRecipient != "" {
|
|
||||||
emailURL := fmt.Sprintf(
|
|
||||||
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
|
||||||
s.c.EmailSMTPUsername,
|
|
||||||
s.c.EmailSMTPPassword,
|
|
||||||
s.c.EmailSMTPHost,
|
|
||||||
s.c.EmailSMTPPort,
|
|
||||||
s.c.EmailNotificationSender,
|
|
||||||
s.c.EmailNotificationRecipient,
|
|
||||||
)
|
|
||||||
s.c.NotificationURLs = append(s.c.NotificationURLs, emailURL)
|
|
||||||
s.logger.Warn(
|
|
||||||
"Using EMAIL_* keys for providing notification configuration has been deprecated and will be removed in the next major version.",
|
|
||||||
)
|
|
||||||
s.logger.Warn(
|
|
||||||
"Please use NOTIFICATION_URLS instead. Refer to the README for an upgrade guide.",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
|
||||||
}
|
|
||||||
s.hookLevel = hookLevel
|
|
||||||
|
|
||||||
if len(s.c.NotificationURLs) > 0 {
|
|
||||||
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
|
||||||
if senderErr != nil {
|
|
||||||
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
|
||||||
}
|
|
||||||
s.sender = sender
|
|
||||||
// To prevent duplicate notifications, ensure the regsistered callbacks
|
|
||||||
// run mutually exclusive.
|
|
||||||
s.registerHook(hookLevelError, func(err error) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.notifyFailure(err)
|
|
||||||
})
|
|
||||||
s.registerHook(hookLevelInfo, func(err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.notifySuccess()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var noop = func() error { return nil }
|
|
||||||
|
|
||||||
// registerHook adds the given action at the given level.
|
|
||||||
func (s *script) registerHook(level hookLevel, action func(err error) error) {
|
|
||||||
s.hooks = append(s.hooks, hook{level, action})
|
|
||||||
}
|
|
||||||
|
|
||||||
// notifyFailure sends a notification about a failed backup run
|
|
||||||
func (s *script) notifyFailure(err error) error {
|
|
||||||
body := fmt.Sprintf(
|
|
||||||
"Running docker-volume-backup failed with error: %s\n\nLog output of the failed run was:\n\n%s\n", err, s.output.String(),
|
|
||||||
)
|
|
||||||
title := fmt.Sprintf("Failure running docker-volume-backup at %s", s.start.Format(time.RFC3339))
|
|
||||||
if err := s.sendNotification(title, body); err != nil {
|
|
||||||
return fmt.Errorf("notifyFailure: error notifying: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// notifyFailure sends a notification about a successful backup run
|
|
||||||
func (s *script) notifySuccess() error {
|
|
||||||
title := fmt.Sprintf("Success running docker-volume-backup at %s", s.start.Format(time.RFC3339))
|
|
||||||
body := fmt.Sprintf(
|
|
||||||
"Running docker-volume-backup succeeded.\n\nLog output was:\n\n%s\n", s.output.String(),
|
|
||||||
)
|
|
||||||
if err := s.sendNotification(title, body); err != nil {
|
|
||||||
return fmt.Errorf("notifySuccess: error notifying: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendNotification sends a notification to all configured third party services
|
|
||||||
func (s *script) sendNotification(title, body string) error {
|
|
||||||
var errs []error
|
|
||||||
for _, result := range s.sender.Send(body, &sTypes.Params{"title": title}) {
|
|
||||||
if result != nil {
|
|
||||||
errs = append(errs, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(errs) != 0 {
|
|
||||||
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopContainers stops all Docker containers that are marked as to being
|
|
||||||
// stopped during the backup and returns a function that can be called to
|
|
||||||
// restart everything that has been stopped.
|
|
||||||
func (s *script) stopContainers() (func() error, error) {
|
|
||||||
if s.cli == nil {
|
|
||||||
return noop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
|
||||||
Quiet: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerLabel := fmt.Sprintf(
|
|
||||||
"docker-volume-backup.stop-during-backup=%s",
|
|
||||||
s.c.BackupStopContainerLabel,
|
|
||||||
)
|
|
||||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
|
||||||
Key: "label",
|
|
||||||
Value: containerLabel,
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(containersToStop) == 0 {
|
|
||||||
return noop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Infof(
|
|
||||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
|
||||||
len(containersToStop),
|
|
||||||
containerLabel,
|
|
||||||
len(allContainers),
|
|
||||||
)
|
|
||||||
|
|
||||||
var stoppedContainers []types.Container
|
|
||||||
var stopErrors []error
|
|
||||||
for _, container := range containersToStop {
|
|
||||||
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
|
|
||||||
stopErrors = append(stopErrors, err)
|
|
||||||
} else {
|
|
||||||
stoppedContainers = append(stoppedContainers, container)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var stopError error
|
|
||||||
if len(stopErrors) != 0 {
|
|
||||||
stopError = fmt.Errorf(
|
|
||||||
"stopContainersAndRun: %d error(s) stopping containers: %w",
|
|
||||||
len(stopErrors),
|
|
||||||
join(stopErrors...),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() error {
|
|
||||||
servicesRequiringUpdate := map[string]struct{}{}
|
|
||||||
|
|
||||||
var restartErrors []error
|
|
||||||
for _, container := range stoppedContainers {
|
|
||||||
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
|
|
||||||
servicesRequiringUpdate[swarmServiceName] = struct{}{}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
|
||||||
restartErrors = append(restartErrors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(servicesRequiringUpdate) != 0 {
|
|
||||||
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
|
||||||
for serviceName := range servicesRequiringUpdate {
|
|
||||||
var serviceMatch swarm.Service
|
|
||||||
for _, service := range services {
|
|
||||||
if service.Spec.Name == serviceName {
|
|
||||||
serviceMatch = service
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if serviceMatch.ID == "" {
|
|
||||||
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
|
|
||||||
}
|
|
||||||
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
|
|
||||||
if _, err := s.cli.ServiceUpdate(
|
|
||||||
context.Background(), serviceMatch.ID,
|
|
||||||
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
|
||||||
); err != nil {
|
|
||||||
restartErrors = append(restartErrors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(restartErrors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
|
|
||||||
len(restartErrors),
|
|
||||||
join(restartErrors...),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Restarted %d container(s) and the matching service(s).",
|
|
||||||
len(stoppedContainers),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}, stopError
|
|
||||||
}
|
|
||||||
|
|
||||||
// takeBackup creates a tar archive of the configured backup location and
|
|
||||||
// saves it to disk.
|
|
||||||
func (s *script) takeBackup() error {
|
|
||||||
backupSources := s.c.BackupSources
|
|
||||||
|
|
||||||
if s.c.BackupFromSnapshot {
|
|
||||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
|
||||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(backupSources); err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Removed snapshot `%s`.", backupSources)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
|
||||||
PreserveTimes: true,
|
|
||||||
PreserveOwner: true,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
|
|
||||||
}
|
|
||||||
|
|
||||||
tarFile := s.file
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(tarFile); err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := targz.Compress(backupSources, tarFile); err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
|
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
|
||||||
// untouched.
|
|
||||||
func (s *script) encryptBackup() error {
|
|
||||||
if s.c.GpgPassphrase == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(gpgFile); err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
outFile, err := os.Create(gpgFile)
|
|
||||||
defer outFile.Close()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
|
||||||
IsBinary: true,
|
|
||||||
FileName: name,
|
|
||||||
}, nil)
|
|
||||||
defer dst.Close()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
|
||||||
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = gpgFile
|
|
||||||
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyBackup makes sure the backup file is copied to both local and remote locations
|
|
||||||
// as per the given configuration.
|
|
||||||
func (s *script) copyBackup() error {
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
if s.mc != nil {
|
|
||||||
if _, err := s.mc.FPutObject(context.Background(), s.c.AwsS3BucketName, name, s.file, minio.PutObjectOptions{
|
|
||||||
ContentType: "application/tar+gzip",
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
|
||||||
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
|
||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
|
|
||||||
if s.c.BackupLatestSymlink != "" {
|
|
||||||
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
|
|
||||||
if _, err := os.Lstat(symlink); err == nil {
|
|
||||||
os.Remove(symlink)
|
|
||||||
}
|
|
||||||
if err := os.Symlink(name, symlink); err != nil {
|
|
||||||
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneOldBackups rotates away backups from local and remote storages using
|
|
||||||
// the given configuration. In case the given configuration would delete all
|
|
||||||
// backups, it does nothing instead.
|
|
||||||
func (s *script) pruneOldBackups() error {
|
|
||||||
if s.c.BackupRetentionDays < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.BackupPruningLeeway != 0 {
|
|
||||||
s.logger.Infof("Sleeping for %s before pruning backups.", s.c.BackupPruningLeeway)
|
|
||||||
time.Sleep(s.c.BackupPruningLeeway)
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays))
|
|
||||||
|
|
||||||
if s.mc != nil {
|
|
||||||
candidates := s.mc.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
|
||||||
WithMetadata: true,
|
|
||||||
Prefix: s.c.BackupPruningPrefix,
|
|
||||||
})
|
|
||||||
|
|
||||||
var matches []minio.ObjectInfo
|
|
||||||
var lenCandidates int
|
|
||||||
for candidate := range candidates {
|
|
||||||
lenCandidates++
|
|
||||||
if candidate.Err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error looking up candidates from remote storage: %w",
|
|
||||||
candidate.Err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if candidate.LastModified.Before(deadline) {
|
|
||||||
matches = append(matches, candidate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(matches) != 0 && len(matches) != lenCandidates {
|
|
||||||
objectsCh := make(chan minio.ObjectInfo)
|
|
||||||
go func() {
|
|
||||||
for _, match := range matches {
|
|
||||||
objectsCh <- match
|
|
||||||
}
|
|
||||||
close(objectsCh)
|
|
||||||
}()
|
|
||||||
errChan := s.mc.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
|
|
||||||
var removeErrors []error
|
|
||||||
for result := range errChan {
|
|
||||||
if result.Err != nil {
|
|
||||||
removeErrors = append(removeErrors, result.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(removeErrors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
|
|
||||||
len(removeErrors),
|
|
||||||
join(removeErrors...),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period of %d days.",
|
|
||||||
len(matches),
|
|
||||||
lenCandidates,
|
|
||||||
s.c.BackupRetentionDays,
|
|
||||||
)
|
|
||||||
} else if len(matches) != 0 && len(matches) == lenCandidates {
|
|
||||||
s.logger.Warnf(
|
|
||||||
"The current configuration would delete all %d remote backup copies.",
|
|
||||||
len(matches),
|
|
||||||
)
|
|
||||||
s.logger.Warn("Refusing to do so, please check your configuration.")
|
|
||||||
} else {
|
|
||||||
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
|
||||||
globPattern := path.Join(
|
|
||||||
s.c.BackupArchive,
|
|
||||||
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
|
|
||||||
)
|
|
||||||
globMatches, err := filepath.Glob(globPattern)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error looking up matching files using pattern %s: %w",
|
|
||||||
globPattern,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
var candidates []string
|
|
||||||
for _, candidate := range globMatches {
|
|
||||||
fi, err := os.Lstat(candidate)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error calling Lstat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
|
||||||
candidates = append(candidates, candidate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var matches []string
|
|
||||||
for _, candidate := range candidates {
|
|
||||||
fi, err := os.Stat(candidate)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: error calling stat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if fi.ModTime().Before(deadline) {
|
|
||||||
matches = append(matches, candidate)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(matches) != 0 && len(matches) != len(candidates) {
|
|
||||||
var removeErrors []error
|
|
||||||
for _, match := range matches {
|
|
||||||
if err := os.Remove(match); err != nil {
|
|
||||||
removeErrors = append(removeErrors, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(removeErrors) != 0 {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
|
|
||||||
len(removeErrors),
|
|
||||||
join(removeErrors...),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
s.logger.Infof(
|
|
||||||
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period of %d days.",
|
|
||||||
len(matches),
|
|
||||||
len(candidates),
|
|
||||||
s.c.BackupRetentionDays,
|
|
||||||
)
|
|
||||||
} else if len(matches) != 0 && len(matches) == len(candidates) {
|
|
||||||
s.logger.Warnf(
|
|
||||||
"The current configuration would delete all %d local backup copies.",
|
|
||||||
len(matches),
|
|
||||||
)
|
|
||||||
s.logger.Warn("Refusing to do so, please check your configuration.")
|
|
||||||
} else {
|
|
||||||
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runHooks runs all hooks that have been registered using the
|
|
||||||
// given levels in the defined ordering. In case executing a hook returns an
|
|
||||||
// error, the following hooks will still be run before the function returns.
|
|
||||||
func (s *script) runHooks(err error) error {
|
|
||||||
sort.SliceStable(s.hooks, func(i, j int) bool {
|
|
||||||
return s.hooks[i].level < s.hooks[j].level
|
|
||||||
})
|
|
||||||
var actionErrors []error
|
|
||||||
for _, hook := range s.hooks {
|
|
||||||
if hook.level > s.hookLevel {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if actionErr := hook.action(err); actionErr != nil {
|
|
||||||
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(actionErrors) != 0 {
|
|
||||||
return join(actionErrors...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// must exits the script run prematurely in case the given error
|
|
||||||
// is non-nil.
|
|
||||||
func (s *script) must(err error) {
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Fatal error running backup: %s", err)
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove removes the given file or directory from disk.
|
|
||||||
func remove(location string) error {
|
|
||||||
fi, err := os.Lstat(location)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
|
||||||
}
|
|
||||||
if fi.IsDir() {
|
|
||||||
err = os.RemoveAll(location)
|
|
||||||
} else {
|
|
||||||
err = os.Remove(location)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
|
||||||
// caller invokes the returned release func. When invoked while the file is
|
|
||||||
// still locked the function panics.
|
|
||||||
func lock(lockfile string) func() error {
|
|
||||||
fileLock := flock.New(lockfile)
|
|
||||||
acquired, err := fileLock.TryLock()
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if !acquired {
|
|
||||||
panic("unable to acquire file lock")
|
|
||||||
}
|
|
||||||
return fileLock.Unlock
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy creates a copy of the file located at `dst` at `src`.
|
|
||||||
func copyFile(src, dst string) error {
|
|
||||||
in, err := os.Open(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
out, err := os.Create(dst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
if err != nil {
|
|
||||||
out.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return out.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// join takes a list of errors and joins them into a single error
|
|
||||||
func join(errs ...error) error {
|
|
||||||
if len(errs) == 1 {
|
|
||||||
return errs[0]
|
|
||||||
}
|
|
||||||
var msgs []string
|
|
||||||
for _, err := range errs {
|
|
||||||
if err == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
msgs = append(msgs, err.Error())
|
|
||||||
}
|
|
||||||
return errors.New("[" + strings.Join(msgs, ", ") + "]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// buffer takes an io.Writer and returns a wrapped version of the
|
|
||||||
// writer that writes to both the original target as well as the returned buffer
|
|
||||||
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
|
|
||||||
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
|
|
||||||
return buffering, &buffering.buf
|
|
||||||
}
|
|
||||||
|
|
||||||
type bufferingWriter struct {
|
|
||||||
buf bytes.Buffer
|
|
||||||
writer io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
|
||||||
if n, err := b.buf.Write(p); err != nil {
|
|
||||||
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
|
|
||||||
}
|
|
||||||
return b.writer.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// hook contains a queued action that can be trigger them when the script
|
|
||||||
// reaches a certain point (e.g. unsuccessful backup)
|
|
||||||
type hook struct {
|
|
||||||
level hookLevel
|
|
||||||
action func(err error) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type hookLevel int
|
|
||||||
|
|
||||||
const (
|
|
||||||
hookLevelPlumbing hookLevel = iota
|
|
||||||
hookLevelError
|
|
||||||
hookLevelInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
var hookLevels = map[string]hookLevel{
|
|
||||||
"info": hookLevelInfo,
|
|
||||||
"error": hookLevelError,
|
|
||||||
}
|
}
|
||||||
|
|||||||
105
cmd/backup/notifications.go
Normal file
105
cmd/backup/notifications.go
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed notifications.tmpl
|
||||||
|
var defaultNotifications string
|
||||||
|
|
||||||
|
// NotificationData data to be passed to the notification templates
|
||||||
|
type NotificationData struct {
|
||||||
|
Error error
|
||||||
|
Config *Config
|
||||||
|
Stats *Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify sends a notification using the given title and body templates.
|
||||||
|
// Automatically creates notification data, adding the given error
|
||||||
|
func (s *script) notify(titleTemplate string, bodyTemplate string, err error) error {
|
||||||
|
params := NotificationData{
|
||||||
|
Error: err,
|
||||||
|
Stats: s.stats,
|
||||||
|
Config: s.c,
|
||||||
|
}
|
||||||
|
|
||||||
|
titleBuf := &bytes.Buffer{}
|
||||||
|
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error executing %s template: %w", titleTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBuf := &bytes.Buffer{}
|
||||||
|
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error executing %s template: %w", bodyTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
||||||
|
return fmt.Errorf("notifyFailure: error notifying: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyFailure sends a notification about a failed backup run
|
||||||
|
func (s *script) notifyFailure(err error) error {
|
||||||
|
return s.notify("title_failure", "body_failure", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// notifyFailure sends a notification about a successful backup run
|
||||||
|
func (s *script) notifySuccess() error {
|
||||||
|
return s.notify("title_success", "body_success", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendNotification sends a notification to all configured third party services
|
||||||
|
func (s *script) sendNotification(title, body string) error {
|
||||||
|
var errs []error
|
||||||
|
for _, result := range s.sender.Send(body, &sTypes.Params{"title": title}) {
|
||||||
|
if result != nil {
|
||||||
|
errs = append(errs, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(errs) != 0 {
|
||||||
|
return fmt.Errorf("sendNotification: error sending message: %w", join(errs...))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var templateHelpers = template.FuncMap{
|
||||||
|
"formatTime": func(t time.Time) string {
|
||||||
|
return t.Format(time.RFC3339)
|
||||||
|
},
|
||||||
|
"formatBytesDec": func(bytes uint64) string {
|
||||||
|
return formatBytes(bytes, true)
|
||||||
|
},
|
||||||
|
"formatBytesBin": func(bytes uint64) string {
|
||||||
|
return formatBytes(bytes, false)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatBytes converts an amount of bytes in a human-readable representation
|
||||||
|
// the decimal parameter specifies if using powers of 1000 (decimal) or powers of 1024 (binary)
|
||||||
|
func formatBytes(b uint64, decimal bool) string {
|
||||||
|
unit := uint64(1024)
|
||||||
|
format := "%.1f %ciB"
|
||||||
|
if decimal {
|
||||||
|
unit = uint64(1000)
|
||||||
|
format = "%.1f %cB"
|
||||||
|
}
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := unit, 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(format, float64(b)/float64(div), "kMGTPE"[exp])
|
||||||
|
}
|
||||||
26
cmd/backup/notifications.tmpl
Normal file
26
cmd/backup/notifications.tmpl
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
{{ define "title_failure" -}}
|
||||||
|
Failure running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "body_failure" -}}
|
||||||
|
Running docker-volume-backup failed with error: {{ .Error }}
|
||||||
|
|
||||||
|
Log output of the failed run was:
|
||||||
|
|
||||||
|
{{ .Stats.LogOutput }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "title_success" -}}
|
||||||
|
Success running docker-volume-backup at {{ .Stats.StartTime | formatTime }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
|
||||||
|
{{ define "body_success" -}}
|
||||||
|
Running docker-volume-backup succeeded.
|
||||||
|
|
||||||
|
Log output was:
|
||||||
|
|
||||||
|
{{ .Stats.LogOutput }}
|
||||||
|
{{- end }}
|
||||||
665
cmd/backup/script.go
Normal file
665
cmd/backup/script.go
Normal file
@@ -0,0 +1,665 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"text/template"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containrrr/shoutrrr"
|
||||||
|
"github.com/containrrr/shoutrrr/pkg/router"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/kelseyhightower/envconfig"
|
||||||
|
"github.com/leekchan/timeutil"
|
||||||
|
"github.com/m90/targz"
|
||||||
|
"github.com/minio/minio-go/v7"
|
||||||
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/otiai10/copy"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/studio-b12/gowebdav"
|
||||||
|
"golang.org/x/crypto/openpgp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// script holds all the stateful information required to orchestrate a
|
||||||
|
// single backup run.
|
||||||
|
type script struct {
|
||||||
|
cli *client.Client
|
||||||
|
minioClient *minio.Client
|
||||||
|
webdavClient *gowebdav.Client
|
||||||
|
logger *logrus.Logger
|
||||||
|
sender *router.ServiceRouter
|
||||||
|
template *template.Template
|
||||||
|
hooks []hook
|
||||||
|
hookLevel hookLevel
|
||||||
|
|
||||||
|
file string
|
||||||
|
stats *Stats
|
||||||
|
|
||||||
|
c *Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// newScript creates all resources needed for the script to perform actions against
|
||||||
|
// remote resources like the Docker engine or remote storage locations. All
|
||||||
|
// reading from env vars or other configuration sources is expected to happen
|
||||||
|
// in this method.
|
||||||
|
func newScript() (*script, error) {
|
||||||
|
stdOut, logBuffer := buffer(os.Stdout)
|
||||||
|
s := &script{
|
||||||
|
c: &Config{},
|
||||||
|
logger: &logrus.Logger{
|
||||||
|
Out: stdOut,
|
||||||
|
Formatter: new(logrus.TextFormatter),
|
||||||
|
Hooks: make(logrus.LevelHooks),
|
||||||
|
Level: logrus.InfoLevel,
|
||||||
|
},
|
||||||
|
stats: &Stats{
|
||||||
|
StartTime: time.Now(),
|
||||||
|
LogOutput: logBuffer,
|
||||||
|
Storages: StoragesStats{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
s.stats.EndTime = time.Now()
|
||||||
|
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.EndTime)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := envconfig.Process("", s.c); err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||||
|
if s.c.BackupFilenameExpand {
|
||||||
|
s.file = os.ExpandEnv(s.file)
|
||||||
|
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
||||||
|
s.c.BackupPruningPrefix = os.ExpandEnv(s.c.BackupPruningPrefix)
|
||||||
|
}
|
||||||
|
s.file = timeutil.Strftime(&s.stats.StartTime, s.file)
|
||||||
|
|
||||||
|
_, err := os.Stat("/var/run/docker.sock")
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to create docker client")
|
||||||
|
}
|
||||||
|
s.cli = cli
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.AwsS3BucketName != "" {
|
||||||
|
var creds *credentials.Credentials
|
||||||
|
if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" {
|
||||||
|
creds = credentials.NewStaticV4(
|
||||||
|
s.c.AwsAccessKeyID,
|
||||||
|
s.c.AwsSecretAccessKey,
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
} else if s.c.AwsIamRoleEndpoint != "" {
|
||||||
|
creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint)
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
options := minio.Options{
|
||||||
|
Creds: creds,
|
||||||
|
Secure: s.c.AwsEndpointProto == "https",
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.AwsEndpointInsecure {
|
||||||
|
if !options.Secure {
|
||||||
|
return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||||
|
}
|
||||||
|
|
||||||
|
transport, err := minio.DefaultTransport(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: failed to create default minio transport")
|
||||||
|
}
|
||||||
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
|
options.Transport = transport
|
||||||
|
}
|
||||||
|
|
||||||
|
mc, err := minio.New(s.c.AwsEndpoint, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error setting up minio client: %w", err)
|
||||||
|
}
|
||||||
|
s.minioClient = mc
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.WebdavUrl != "" {
|
||||||
|
if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" {
|
||||||
|
return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided")
|
||||||
|
} else {
|
||||||
|
webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword)
|
||||||
|
s.webdavClient = webdavClient
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.EmailNotificationRecipient != "" {
|
||||||
|
emailURL := fmt.Sprintf(
|
||||||
|
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||||
|
s.c.EmailSMTPUsername,
|
||||||
|
s.c.EmailSMTPPassword,
|
||||||
|
s.c.EmailSMTPHost,
|
||||||
|
s.c.EmailSMTPPort,
|
||||||
|
s.c.EmailNotificationSender,
|
||||||
|
s.c.EmailNotificationRecipient,
|
||||||
|
)
|
||||||
|
s.c.NotificationURLs = append(s.c.NotificationURLs, emailURL)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using EMAIL_* keys for providing notification configuration has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use NOTIFICATION_URLS instead. Refer to the README for an upgrade guide.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
||||||
|
}
|
||||||
|
s.hookLevel = hookLevel
|
||||||
|
|
||||||
|
if len(s.c.NotificationURLs) > 0 {
|
||||||
|
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||||
|
if senderErr != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
||||||
|
}
|
||||||
|
s.sender = sender
|
||||||
|
|
||||||
|
tmpl := template.New("")
|
||||||
|
tmpl.Funcs(templateHelpers)
|
||||||
|
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||||
|
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.template = tmpl
|
||||||
|
|
||||||
|
// To prevent duplicate notifications, ensure the regsistered callbacks
|
||||||
|
// run mutually exclusive.
|
||||||
|
s.registerHook(hookLevelError, func(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.notifyFailure(err)
|
||||||
|
})
|
||||||
|
s.registerHook(hookLevelInfo, func(err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.notifySuccess()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopContainers stops all Docker containers that are marked as to being
|
||||||
|
// stopped during the backup and returns a function that can be called to
|
||||||
|
// restart everything that has been stopped.
|
||||||
|
func (s *script) stopContainers() (func() error, error) {
|
||||||
|
if s.cli == nil {
|
||||||
|
return noop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
containerLabel := fmt.Sprintf(
|
||||||
|
"docker-volume-backup.stop-during-backup=%s",
|
||||||
|
s.c.BackupStopContainerLabel,
|
||||||
|
)
|
||||||
|
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
|
Quiet: true,
|
||||||
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
|
Key: "label",
|
||||||
|
Value: containerLabel,
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(containersToStop) == 0 {
|
||||||
|
return noop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof(
|
||||||
|
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||||
|
len(containersToStop),
|
||||||
|
containerLabel,
|
||||||
|
len(allContainers),
|
||||||
|
)
|
||||||
|
|
||||||
|
var stoppedContainers []types.Container
|
||||||
|
var stopErrors []error
|
||||||
|
for _, container := range containersToStop {
|
||||||
|
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
|
||||||
|
stopErrors = append(stopErrors, err)
|
||||||
|
} else {
|
||||||
|
stoppedContainers = append(stoppedContainers, container)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var stopError error
|
||||||
|
if len(stopErrors) != 0 {
|
||||||
|
stopError = fmt.Errorf(
|
||||||
|
"stopContainersAndRun: %d error(s) stopping containers: %w",
|
||||||
|
len(stopErrors),
|
||||||
|
join(stopErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Containers = ContainersStats{
|
||||||
|
All: uint(len(allContainers)),
|
||||||
|
ToStop: uint(len(containersToStop)),
|
||||||
|
Stopped: uint(len(stoppedContainers)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
servicesRequiringUpdate := map[string]struct{}{}
|
||||||
|
|
||||||
|
var restartErrors []error
|
||||||
|
for _, container := range stoppedContainers {
|
||||||
|
if swarmServiceName, ok := container.Labels["com.docker.swarm.service.name"]; ok {
|
||||||
|
servicesRequiringUpdate[swarmServiceName] = struct{}{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
||||||
|
restartErrors = append(restartErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(servicesRequiringUpdate) != 0 {
|
||||||
|
services, _ := s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||||
|
for serviceName := range servicesRequiringUpdate {
|
||||||
|
var serviceMatch swarm.Service
|
||||||
|
for _, service := range services {
|
||||||
|
if service.Spec.Name == serviceName {
|
||||||
|
serviceMatch = service
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if serviceMatch.ID == "" {
|
||||||
|
return fmt.Errorf("stopContainersAndRun: couldn't find service with name %s", serviceName)
|
||||||
|
}
|
||||||
|
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
|
||||||
|
if _, err := s.cli.ServiceUpdate(
|
||||||
|
context.Background(), serviceMatch.ID,
|
||||||
|
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
||||||
|
); err != nil {
|
||||||
|
restartErrors = append(restartErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(restartErrors) != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"stopContainersAndRun: %d error(s) restarting containers and services: %w",
|
||||||
|
len(restartErrors),
|
||||||
|
join(restartErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
s.logger.Infof(
|
||||||
|
"Restarted %d container(s) and the matching service(s).",
|
||||||
|
len(stoppedContainers),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}, stopError
|
||||||
|
}
|
||||||
|
|
||||||
|
// takeBackup creates a tar archive of the configured backup location and
|
||||||
|
// saves it to disk.
|
||||||
|
func (s *script) takeBackup() error {
|
||||||
|
backupSources := s.c.BackupSources
|
||||||
|
|
||||||
|
if s.c.BackupFromSnapshot {
|
||||||
|
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||||
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(backupSources); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error removing snapshot: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed snapshot `%s`.", backupSources)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||||
|
PreserveTimes: true,
|
||||||
|
PreserveOwner: true,
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error creating snapshot: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
|
||||||
|
}
|
||||||
|
|
||||||
|
tarFile := s.file
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(tarFile); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error removing tar file: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := targz.Compress(backupSources, tarFile); err != nil {
|
||||||
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptBackup encrypts the backup file using PGP and the configured passphrase.
|
||||||
|
// In case no passphrase is given it returns early, leaving the backup file
|
||||||
|
// untouched.
|
||||||
|
func (s *script) encryptBackup() error {
|
||||||
|
if s.c.GpgPassphrase == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(gpgFile); err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error removing gpg file: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
outFile, err := os.Create(gpgFile)
|
||||||
|
defer outFile.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error opening out file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||||
|
IsBinary: true,
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
defer dst.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error opening backup file `%s`: %w", s.file, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
|
return fmt.Errorf("encryptBackup: error writing ciphertext to file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.file = gpgFile
|
||||||
|
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyBackup makes sure the backup file is copied to both local and remote locations
|
||||||
|
// as per the given configuration.
|
||||||
|
func (s *script) copyBackup() error {
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
if stat, err := os.Stat(s.file); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: unable to stat backup file: %w", err)
|
||||||
|
} else {
|
||||||
|
size := stat.Size()
|
||||||
|
s.stats.BackupFile = BackupFileStats{
|
||||||
|
Size: uint64(size),
|
||||||
|
Name: name,
|
||||||
|
FullPath: s.file,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.minioClient != nil {
|
||||||
|
if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{
|
||||||
|
ContentType: "application/tar+gzip",
|
||||||
|
}); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.webdavClient != nil {
|
||||||
|
bytes, err := os.ReadFile(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err)
|
||||||
|
}
|
||||||
|
if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
|
if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive)
|
||||||
|
if s.c.BackupLatestSymlink != "" {
|
||||||
|
symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink)
|
||||||
|
if _, err := os.Lstat(symlink); err == nil {
|
||||||
|
os.Remove(symlink)
|
||||||
|
}
|
||||||
|
if err := os.Symlink(name, symlink); err != nil {
|
||||||
|
return fmt.Errorf("copyBackup: error creating latest symlink: %w", err)
|
||||||
|
}
|
||||||
|
s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// pruneBackups rotates away backups from local and remote storages using
|
||||||
|
// the given configuration. In case the given configuration would delete all
|
||||||
|
// backups, it does nothing instead and logs a warning.
|
||||||
|
func (s *script) pruneBackups() error {
|
||||||
|
if s.c.BackupRetentionDays < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
||||||
|
|
||||||
|
// doPrune holds general control flow that applies to any kind of storage.
|
||||||
|
// Callers can pass in a thunk that performs the actual deletion of files.
|
||||||
|
var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
|
||||||
|
if lenMatches != 0 && lenMatches != lenCandidates {
|
||||||
|
if err := doRemoveFiles(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.logger.Infof(
|
||||||
|
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
|
||||||
|
lenMatches,
|
||||||
|
lenCandidates,
|
||||||
|
description,
|
||||||
|
s.c.BackupRetentionDays,
|
||||||
|
)
|
||||||
|
} else if lenMatches != 0 && lenMatches == lenCandidates {
|
||||||
|
s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description)
|
||||||
|
s.logger.Warn("Refusing to do so, please check your configuration.")
|
||||||
|
} else {
|
||||||
|
s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.minioClient != nil {
|
||||||
|
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
|
||||||
|
WithMetadata: true,
|
||||||
|
Prefix: s.c.BackupPruningPrefix,
|
||||||
|
})
|
||||||
|
|
||||||
|
var matches []minio.ObjectInfo
|
||||||
|
var lenCandidates int
|
||||||
|
for candidate := range candidates {
|
||||||
|
lenCandidates++
|
||||||
|
if candidate.Err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error looking up candidates from remote storage: %w",
|
||||||
|
candidate.Err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if candidate.LastModified.Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.S3 = StorageStats{
|
||||||
|
Total: uint(lenCandidates),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), lenCandidates, "remote backup(s)", func() error {
|
||||||
|
objectsCh := make(chan minio.ObjectInfo)
|
||||||
|
go func() {
|
||||||
|
for _, match := range matches {
|
||||||
|
objectsCh <- match
|
||||||
|
}
|
||||||
|
close(objectsCh)
|
||||||
|
}()
|
||||||
|
errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{})
|
||||||
|
var removeErrors []error
|
||||||
|
for result := range errChan {
|
||||||
|
if result.Err != nil {
|
||||||
|
removeErrors = append(removeErrors, result.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(removeErrors) != 0 {
|
||||||
|
return join(removeErrors...)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.webdavClient != nil {
|
||||||
|
candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err)
|
||||||
|
}
|
||||||
|
var matches []fs.FileInfo
|
||||||
|
var lenCandidates int
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
lenCandidates++
|
||||||
|
if candidate.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.WebDAV = StorageStats{
|
||||||
|
Total: uint(lenCandidates),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil {
|
||||||
|
return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) {
|
||||||
|
globPattern := path.Join(
|
||||||
|
s.c.BackupArchive,
|
||||||
|
fmt.Sprintf("%s*", s.c.BackupPruningPrefix),
|
||||||
|
)
|
||||||
|
globMatches, err := filepath.Glob(globPattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error looking up matching files using pattern %s: %w",
|
||||||
|
globPattern,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var candidates []string
|
||||||
|
for _, candidate := range globMatches {
|
||||||
|
fi, err := os.Lstat(candidate)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error calling Lstat on file %s: %w",
|
||||||
|
candidate,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||||
|
candidates = append(candidates, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []string
|
||||||
|
for _, candidate := range candidates {
|
||||||
|
fi, err := os.Stat(candidate)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: error calling stat on file %s: %w",
|
||||||
|
candidate,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if fi.ModTime().Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.stats.Storages.Local = StorageStats{
|
||||||
|
Total: uint(len(candidates)),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
doPrune(len(matches), len(candidates), "local backup(s)", func() error {
|
||||||
|
var removeErrors []error
|
||||||
|
for _, match := range matches {
|
||||||
|
if err := os.Remove(match); err != nil {
|
||||||
|
removeErrors = append(removeErrors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(removeErrors) != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"pruneBackups: %d error(s) deleting local files, starting with: %w",
|
||||||
|
len(removeErrors),
|
||||||
|
join(removeErrors...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// must exits the script run prematurely in case the given error
|
||||||
|
// is non-nil.
|
||||||
|
func (s *script) must(err error) {
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Errorf("Fatal error running backup: %s", err)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
49
cmd/backup/stats.go
Normal file
49
cmd/backup/stats.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainersStats stats about the docker containers
|
||||||
|
type ContainersStats struct {
|
||||||
|
All uint
|
||||||
|
ToStop uint
|
||||||
|
Stopped uint
|
||||||
|
StopErrors uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackupFileStats stats about the created backup file
|
||||||
|
type BackupFileStats struct {
|
||||||
|
Name string
|
||||||
|
FullPath string
|
||||||
|
Size uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageStats stats about the status of an archival directory
|
||||||
|
type StorageStats struct {
|
||||||
|
Total uint
|
||||||
|
Pruned uint
|
||||||
|
PruneErrors uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// StoragesStats stats about each possible archival location (Local, WebDAV, S3)
|
||||||
|
type StoragesStats struct {
|
||||||
|
Local StorageStats
|
||||||
|
WebDAV StorageStats
|
||||||
|
S3 StorageStats
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats global stats regarding script execution
|
||||||
|
type Stats struct {
|
||||||
|
StartTime time.Time
|
||||||
|
EndTime time.Time
|
||||||
|
TookTime time.Duration
|
||||||
|
LogOutput *bytes.Buffer
|
||||||
|
Containers ContainersStats
|
||||||
|
BackupFile BackupFileStats
|
||||||
|
Storages StoragesStats
|
||||||
|
}
|
||||||
107
cmd/backup/util.go
Normal file
107
cmd/backup/util.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gofrs/flock"
|
||||||
|
)
|
||||||
|
|
||||||
|
var noop = func() error { return nil }
|
||||||
|
|
||||||
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
|
// caller invokes the returned release func. When invoked while the file is
|
||||||
|
// still locked the function panics.
|
||||||
|
func lock(lockfile string) func() error {
|
||||||
|
fileLock := flock.New(lockfile)
|
||||||
|
acquired, err := fileLock.TryLock()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if !acquired {
|
||||||
|
panic("unable to acquire file lock")
|
||||||
|
}
|
||||||
|
return fileLock.Unlock
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy creates a copy of the file located at `dst` at `src`.
|
||||||
|
func copyFile(src, dst string) error {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
out.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return out.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// join takes a list of errors and joins them into a single error
|
||||||
|
func join(errs ...error) error {
|
||||||
|
if len(errs) == 1 {
|
||||||
|
return errs[0]
|
||||||
|
}
|
||||||
|
var msgs []string
|
||||||
|
for _, err := range errs {
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
msgs = append(msgs, err.Error())
|
||||||
|
}
|
||||||
|
return errors.New("[" + strings.Join(msgs, ", ") + "]")
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove removes the given file or directory from disk.
|
||||||
|
func remove(location string) error {
|
||||||
|
fi, err := os.Lstat(location)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
err = os.RemoveAll(location)
|
||||||
|
} else {
|
||||||
|
err = os.Remove(location)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buffer takes an io.Writer and returns a wrapped version of the
|
||||||
|
// writer that writes to both the original target as well as the returned buffer
|
||||||
|
func buffer(w io.Writer) (io.Writer, *bytes.Buffer) {
|
||||||
|
buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w}
|
||||||
|
return buffering, &buffering.buf
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferingWriter struct {
|
||||||
|
buf bytes.Buffer
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if n, err := b.buf.Write(p); err != nil {
|
||||||
|
return n, fmt.Errorf("bufferingWriter: error writing to buffer: %w", err)
|
||||||
|
}
|
||||||
|
return b.writer.Write(p)
|
||||||
|
}
|
||||||
38
docs/NOTIFICATION-TEMPLATES.md
Normal file
38
docs/NOTIFICATION-TEMPLATES.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Notification templates reference
|
||||||
|
|
||||||
|
In order to customize title and body of notifications you'll have to write a [go template](https://pkg.go.dev/text/template) and mount it inside the `/etc/dockervolumebackup/notifications.d/` directory.
|
||||||
|
|
||||||
|
Configuration, data about the backup run and helper functions will be passed to this template, this page documents them fully.
|
||||||
|
|
||||||
|
## Data
|
||||||
|
Here is a list of all data passed to the template:
|
||||||
|
|
||||||
|
* `Config`: this object holds the configuration that has been passed to the script. The field names are the name of the recognized environment variables converted in PascalCase. (e.g. `BACKUP_STOP_CONTAINER_LABEL` becomes `BackupStopContainerLabel`)
|
||||||
|
* `Error`: the error that made the backup fail. Only available in the `title_failure` and `body_failure` templates
|
||||||
|
* `Stats`: objects that holds stats regarding script execution. In case of an unsuccessful run, some information may not be available.
|
||||||
|
* `StartTime`: time when the script started execution
|
||||||
|
* `EndTime`: time when the backup has completed successfully (after pruning)
|
||||||
|
* `TookTime`: amount of time it took for the backup to run. (equal to `EndTime - StartTime`)
|
||||||
|
* `LogOutput`: full log of the application
|
||||||
|
* `Containers`: object containing stats about the docker containers
|
||||||
|
* `All`: total number of containers
|
||||||
|
* `ToStop`: number of containers matched by the stop rule
|
||||||
|
* `Stopped`: number of containers successfully stopped
|
||||||
|
* `StopErrors`: number of containers that were unable to be stopped (equal to `ToStop - Stopped`)
|
||||||
|
* `BackupFile`: object containing information about the backup file
|
||||||
|
* `Name`: name of the backup file (e.g. `backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
|
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||||
|
* `Size`: size in bytes of the backup file
|
||||||
|
* `Storages`: object that holds stats about each storage
|
||||||
|
* `Local`, `S3` or `WebDAV`:
|
||||||
|
* `Total`: total number of backup files
|
||||||
|
* `Pruned`: number of backup files that were deleted due to pruning rule
|
||||||
|
* `PruneErrors`: number of backup files that were unable to be pruned
|
||||||
|
|
||||||
|
## Functions
|
||||||
|
|
||||||
|
Some formatting functions are also available:
|
||||||
|
|
||||||
|
* `formatTime`: formats a time object using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) format (e.g. `2022-02-11T01:00:00Z`)
|
||||||
|
* `formatBytesBin`: formats an amount of bytes using powers of 1024 (e.g. `7055258` bytes will be `6.7 MiB`)
|
||||||
|
* `formatBytesDec`: formats an amount of bytes using powers of 1000 (e.g. `7055258` bytes will be `7.1 MB`)
|
||||||
3
go.mod
3
go.mod
@@ -8,10 +8,11 @@ require (
|
|||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/m90/targz v0.0.0-20210904082215-2e9a4529a615
|
github.com/m90/targz v0.0.0-20220208141135-d3baeef59a97
|
||||||
github.com/minio/minio-go/v7 v7.0.16
|
github.com/minio/minio-go/v7 v7.0.16
|
||||||
github.com/otiai10/copy v1.7.0
|
github.com/otiai10/copy v1.7.0
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
|
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
6
go.sum
6
go.sum
@@ -450,8 +450,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d h1:2puqoOQwi3Ai1oznMOsFIbifm6kIfJaLLyYzWD4IzTs=
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d h1:2puqoOQwi3Ai1oznMOsFIbifm6kIfJaLLyYzWD4IzTs=
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d/go.mod h1:hO90vCP2x3exaSH58BIAowSKvV+0OsY21TtzuFGHON4=
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d/go.mod h1:hO90vCP2x3exaSH58BIAowSKvV+0OsY21TtzuFGHON4=
|
||||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||||
github.com/m90/targz v0.0.0-20210904082215-2e9a4529a615 h1:rn0LO2tQEgCDOct8qnbcslTUpAIWdVlWcGkjoumhf2U=
|
github.com/m90/targz v0.0.0-20220208141135-d3baeef59a97 h1:Uc/WzUKI/zvhkqIzk5TyaPE6AY1SD1DWGc7RV7cky4s=
|
||||||
github.com/m90/targz v0.0.0-20210904082215-2e9a4529a615/go.mod h1:YZK3bSO/oVlk9G+v00BxgzxW2Us4p/R4ysHOBjk0fJI=
|
github.com/m90/targz v0.0.0-20220208141135-d3baeef59a97/go.mod h1:YZK3bSO/oVlk9G+v00BxgzxW2Us4p/R4ysHOBjk0fJI=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
@@ -659,6 +659,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f h1:L2NE7BXnSlSLoNYZ0lCwZDjdnYjCNYC71k9ClZUTFTs=
|
||||||
|
github.com/studio-b12/gowebdav v0.0.0-20211109083228-3f8721cd4b6f/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
|
|||||||
@@ -7,6 +7,9 @@ cd $(dirname $0)
|
|||||||
docker network create test_network
|
docker network create test_network
|
||||||
docker volume create backup_data
|
docker volume create backup_data
|
||||||
docker volume create app_data
|
docker volume create app_data
|
||||||
|
# This volume is created to test whether empty directories are handled
|
||||||
|
# correctly. It is not supposed to hold any data.
|
||||||
|
docker volume create empty_data
|
||||||
|
|
||||||
docker run -d \
|
docker run -d \
|
||||||
--name minio \
|
--name minio \
|
||||||
@@ -31,6 +34,7 @@ sleep 10
|
|||||||
docker run --rm \
|
docker run --rm \
|
||||||
--network test_network \
|
--network test_network \
|
||||||
-v app_data:/backup/app_data \
|
-v app_data:/backup/app_data \
|
||||||
|
-v empty_data:/backup/empty_data \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
--env AWS_ACCESS_KEY_ID=test \
|
--env AWS_ACCESS_KEY_ID=test \
|
||||||
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
@@ -44,10 +48,12 @@ docker run --rm \
|
|||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v backup_data:/data alpine \
|
-v backup_data:/data alpine \
|
||||||
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db'
|
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
# This test does not stop containers during backup. This is happening on
|
||||||
|
# purpose in order to cover this setup as well.
|
||||||
if [ "$(docker ps -q | wc -l)" != "2" ]; then
|
if [ "$(docker ps -q | wc -l)" != "2" ]; then
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
docker ps
|
docker ps
|
||||||
|
|||||||
@@ -10,13 +10,23 @@ services:
|
|||||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||||
volumes:
|
volumes:
|
||||||
- backup_data:/data
|
- minio_backup_data:/data
|
||||||
|
|
||||||
|
webdav:
|
||||||
|
image: bytemark/webdav:2.4
|
||||||
|
environment:
|
||||||
|
AUTH_TYPE: Digest
|
||||||
|
USERNAME: test
|
||||||
|
PASSWORD: test
|
||||||
|
volumes:
|
||||||
|
- webdav_backup_data:/var/lib/dav
|
||||||
|
|
||||||
backup: &default_backup_service
|
backup: &default_backup_service
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
hostname: hostnametoken
|
hostname: hostnametoken
|
||||||
depends_on:
|
depends_on:
|
||||||
- minio
|
- minio
|
||||||
|
- webdav
|
||||||
restart: always
|
restart: always
|
||||||
environment:
|
environment:
|
||||||
AWS_ACCESS_KEY_ID: test
|
AWS_ACCESS_KEY_ID: test
|
||||||
@@ -32,6 +42,10 @@ services:
|
|||||||
BACKUP_PRUNING_LEEWAY: 5s
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
BACKUP_PRUNING_PREFIX: test
|
BACKUP_PRUNING_PREFIX: test
|
||||||
GPG_PASSPHRASE: 1234secret
|
GPG_PASSPHRASE: 1234secret
|
||||||
|
WEBDAV_URL: http://webdav/
|
||||||
|
WEBDAV_PATH: /my/new/path/
|
||||||
|
WEBDAV_USERNAME: test
|
||||||
|
WEBDAV_PASSWORD: test
|
||||||
volumes:
|
volumes:
|
||||||
- ./local:/archive
|
- ./local:/archive
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
@@ -45,5 +59,6 @@ services:
|
|||||||
- app_data:/var/opt/offen
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backup_data:
|
minio_backup_data:
|
||||||
|
webdav_backup_data:
|
||||||
app_data:
|
app_data:
|
||||||
|
|||||||
@@ -9,30 +9,38 @@ mkdir -p local
|
|||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
|
# A symlink for a known file in the volume is created so the test can check
|
||||||
|
# whether symlinks are preserved on backup.
|
||||||
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
|
||||||
docker-compose exec backup backup
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
if [ "$(docker-compose ps -q | wc -l)" != "4" ]; then
|
||||||
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
|
docker-compose ps
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] All containers running post backup."
|
||||||
|
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v compose_backup_data:/data alpine \
|
-v compose_minio_backup_data:/minio_data \
|
||||||
ash -c 'apk add gnupg && echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
-v compose_webdav_backup_data:/webdav_data alpine \
|
||||||
|
ash -c 'apk add gnupg && \
|
||||||
|
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /minio_data/backup/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db && \
|
||||||
|
echo 1234secret | gpg -d --pinentry-mode loopback --passphrase-fd 0 --yes /webdav_data/data/my/new/path/test-hostnametoken.tar.gz.gpg > /tmp/test-hostnametoken.tar.gz && tar -xf /tmp/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared remote backup."
|
echo "[TEST:PASS] Found relevant files in decrypted and untared remote backups."
|
||||||
|
|
||||||
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
|
|
||||||
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
|
echo 1234secret | gpg -d --yes --passphrase-fd 0 ./local/test-hostnametoken.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||||
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
tar -xf ./local/decrypted.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||||
rm ./local/decrypted.tar.gz
|
rm ./local/decrypted.tar.gz
|
||||||
test -L /tmp/backup/app_data/db.link
|
test -L /tmp/backup/app_data/db.link
|
||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared local backup."
|
echo "[TEST:PASS] Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
if [ "$(docker-compose ps -q | wc -l)" != "3" ]; then
|
test -L ./local/test-hostnametoken.latest.tar.gz.gpg
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
echo "[TEST:PASS] Found symlink to latest version in local backup."
|
||||||
docker-compose ps
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
|
||||||
|
|
||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
@@ -43,16 +51,18 @@ sleep 5
|
|||||||
docker-compose exec backup backup
|
docker-compose exec backup backup
|
||||||
|
|
||||||
docker run --rm -it \
|
docker run --rm -it \
|
||||||
-v compose_backup_data:/data alpine \
|
-v compose_minio_backup_data:/minio_data \
|
||||||
ash -c '[ $(find /data/backup/ -type f | wc -l) = "1" ]'
|
-v compose_webdav_backup_data:/webdav_data alpine \
|
||||||
|
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ] && \
|
||||||
|
[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
|
||||||
|
|
||||||
echo "[TEST:PASS] Remote backups have not been deleted."
|
echo "[TEST:PASS] Remote backups have not been deleted."
|
||||||
|
|
||||||
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
||||||
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
|
echo "[TEST:FAIL] Backups should not have been deleted, instead seen:"
|
||||||
find ./local -type f
|
find ./local -type f
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "[TEST:PASS] Local backups have not been deleted."
|
echo "[TEST:PASS] Local backups have not been deleted."
|
||||||
|
|
||||||
docker-compose down --volumes
|
docker-compose down --volumes
|
||||||
|
|||||||
1
test/notifications/.gitignore
vendored
Normal file
1
test/notifications/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
local
|
||||||
36
test/notifications/docker-compose.yml
Normal file
36
test/notifications/docker-compose.yml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
backup: &default_backup_service
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
NOTIFICATION_LEVEL: info
|
||||||
|
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
|
||||||
|
volumes:
|
||||||
|
- ./local:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- ./notifications.tmpl:/etc/dockervolumebackup/notifications.d/notifications.tmpl
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
gotify:
|
||||||
|
image: gotify/server
|
||||||
|
ports:
|
||||||
|
- 8080:80
|
||||||
|
environment:
|
||||||
|
- GOTIFY_DEFAULTUSER_PASS=custom
|
||||||
|
volumes:
|
||||||
|
- gotify_data:/app/data
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
|
gotify_data:
|
||||||
7
test/notifications/notifications.tmpl
Normal file
7
test/notifications/notifications.tmpl
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{{ define "title_success" -}}
|
||||||
|
Successful test run, yay!
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{ define "body_success" -}}
|
||||||
|
Backing up {{ .Stats.BackupFile.FullPath }} succeeded.
|
||||||
|
{{- end }}
|
||||||
52
test/notifications/run.sh
Executable file
52
test/notifications/run.sh
Executable file
@@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
|
||||||
|
mkdir -p local
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
|
||||||
|
echo "[TEST:INFO] Set up Gotify application using token $GOTIFY_TOKEN"
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
|
if [ "$NUM_MESSAGES" != 0 ]; then
|
||||||
|
echo "[TEST:FAIL] Expected no notifications to be sent when not configured"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] No notifications were sent when not configured."
|
||||||
|
|
||||||
|
docker-compose down
|
||||||
|
|
||||||
|
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker-compose up -d
|
||||||
|
|
||||||
|
docker-compose exec backup backup
|
||||||
|
|
||||||
|
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
|
||||||
|
if [ "$NUM_MESSAGES" != 1 ]; then
|
||||||
|
echo "[TEST:FAIL] Expected one notifications to be sent when configured"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Correct number of notifications were sent when configured."
|
||||||
|
|
||||||
|
MESSAGE_TITLE=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].title')
|
||||||
|
MESSAGE_BODY=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages[0].message')
|
||||||
|
|
||||||
|
if [ "$MESSAGE_TITLE" != "Successful test run, yay!" ]; then
|
||||||
|
echo "[TEST:FAIL] Unexpected notification title $MESSAGE_TITLE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Custom notification title was used."
|
||||||
|
|
||||||
|
if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
|
||||||
|
echo "[TEST:FAIL] Unexpected notification body $MESSAGE_BODY"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "[TEST:PASS] Custom notification body was used."
|
||||||
|
|
||||||
|
docker-compose down --volumes
|
||||||
@@ -23,14 +23,13 @@ docker run --rm -it \
|
|||||||
|
|
||||||
echo "[TEST:PASS] Found relevant files in untared backup."
|
echo "[TEST:PASS] Found relevant files in untared backup."
|
||||||
|
|
||||||
|
sleep 5
|
||||||
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
if [ "$(docker ps -q | wc -l)" != "5" ]; then
|
||||||
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
echo "[TEST:FAIL] Expected all containers to be running post backup, instead seen:"
|
||||||
docker ps -a
|
docker ps -a
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "[TEST:PASS] All containers running post backup."
|
echo "[TEST:PASS] All containers running post backup."
|
||||||
|
|
||||||
docker stack rm test_stack
|
docker stack rm test_stack
|
||||||
|
|
||||||
docker swarm leave --force
|
docker swarm leave --force
|
||||||
|
|||||||
Reference in New Issue
Block a user