mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
3 Commits
v2.41.0
...
validate-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8aa6db3f5 | ||
|
|
8a64da4b0b | ||
|
|
f97ce11734 |
@@ -93,6 +93,8 @@ func compress(paths []string, outFilePath, algo string, concurrency int) error {
|
|||||||
|
|
||||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||||
switch algo {
|
switch algo {
|
||||||
|
case "none":
|
||||||
|
return &passThroughWriteCloser{file}, nil
|
||||||
case "gz":
|
case "gz":
|
||||||
w, err := pgzip.NewWriterLevel(file, 5)
|
w, err := pgzip.NewWriterLevel(file, 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -165,3 +167,15 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type passThroughWriteCloser struct {
|
||||||
|
target io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Write(b []byte) (int, error) {
|
||||||
|
return p.target.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -36,6 +36,9 @@ func (c *command) runAsCommand() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, config := range configurations {
|
for _, config := range configurations {
|
||||||
|
if err := config.validate(); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error validating config")
|
||||||
|
}
|
||||||
if err := runScript(config); err != nil {
|
if err := runScript(config); err != nil {
|
||||||
return errwrap.Wrap(err, "error running script")
|
return errwrap.Wrap(err, "error running script")
|
||||||
}
|
}
|
||||||
@@ -101,6 +104,12 @@ func (c *command) schedule(strategy configStrategy) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, cfg := range configurations {
|
for _, cfg := range configurations {
|
||||||
|
if err := cfg.validate(); err != nil {
|
||||||
|
return errwrap.Wrap(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf("error validating config for schedule %s", cfg.BackupCronExpression),
|
||||||
|
)
|
||||||
|
}
|
||||||
config := cfg
|
config := cfg
|
||||||
id, err := c.cr.AddFunc(config.BackupCronExpression, func() {
|
id, err := c.cr.AddFunc(config.BackupCronExpression, func() {
|
||||||
c.logger.Info(
|
c.logger.Info(
|
||||||
|
|||||||
@@ -12,87 +12,96 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
// by users.
|
// by users.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
AwsS3BucketName string `split_words:"true"`
|
AwsS3BucketName string `split_words:"true"`
|
||||||
AwsS3Path string `split_words:"true"`
|
AwsS3Path string `split_words:"true"`
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
AwsEndpointProto string `split_words:"true" default:"https"`
|
AwsEndpointProto string `split_words:"true" default:"https"`
|
||||||
AwsEndpointInsecure bool `split_words:"true"`
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
|
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
|
||||||
AwsStorageClass string `split_words:"true"`
|
AwsStorageClass string `split_words:"true"`
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
AwsIamRoleEndpoint string `split_words:"true"`
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
AwsPartSize int64 `split_words:"true"`
|
AwsPartSize int64 `split_words:"true"`
|
||||||
BackupCompression CompressionType `split_words:"true" default:"gz"`
|
BackupCompression CompressionType `split_words:"true" default:"gz"`
|
||||||
GzipParallelism WholeNumber `split_words:"true" default:"1"`
|
GzipParallelism WholeNumber `split_words:"true" default:"1"`
|
||||||
BackupSources string `split_words:"true" default:"/backup"`
|
BackupSources string `split_words:"true" default:"/backup"`
|
||||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"`
|
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"`
|
||||||
BackupFilenameExpand bool `split_words:"true"`
|
BackupFilenameExpand bool `split_words:"true"`
|
||||||
BackupLatestSymlink string `split_words:"true"`
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
BackupCronExpression string `split_words:"true" default:"@daily"`
|
BackupCronExpression string `split_words:"true" default:"@daily"`
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
BackupStopContainerLabel string `split_words:"true"`
|
BackupStopContainerLabel string `split_words:"true"`
|
||||||
BackupStopDuringBackupLabel string `split_words:"true" default:"true"`
|
BackupStopDuringBackupLabel string `split_words:"true" default:"true"`
|
||||||
BackupStopServiceTimeout time.Duration `split_words:"true" default:"5m"`
|
BackupStopServiceTimeout time.Duration `split_words:"true" default:"5m"`
|
||||||
BackupFromSnapshot bool `split_words:"true"`
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||||
GpgPassphrase string `split_words:"true"`
|
GpgPassphrase string `split_words:"true"`
|
||||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
GpgPublicKeyRing string `split_words:"true"`
|
||||||
NotificationLevel string `split_words:"true" default:"error"`
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
||||||
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
||||||
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
||||||
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
WebdavUrl string `split_words:"true"`
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
WebdavUrlInsecure bool `split_words:"true"`
|
WebdavUrl string `split_words:"true"`
|
||||||
WebdavPath string `split_words:"true" default:"/"`
|
WebdavUrlInsecure bool `split_words:"true"`
|
||||||
WebdavUsername string `split_words:"true"`
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
WebdavPassword string `split_words:"true"`
|
WebdavUsername string `split_words:"true"`
|
||||||
SSHHostName string `split_words:"true"`
|
WebdavPassword string `split_words:"true"`
|
||||||
SSHPort string `split_words:"true" default:"22"`
|
SSHHostName string `split_words:"true"`
|
||||||
SSHUser string `split_words:"true"`
|
SSHPort string `split_words:"true" default:"22"`
|
||||||
SSHPassword string `split_words:"true"`
|
SSHUser string `split_words:"true"`
|
||||||
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
SSHPassword string `split_words:"true"`
|
||||||
SSHIdentityPassphrase string `split_words:"true"`
|
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
||||||
SSHRemotePath string `split_words:"true"`
|
SSHIdentityPassphrase string `split_words:"true"`
|
||||||
ExecLabel string `split_words:"true"`
|
SSHRemotePath string `split_words:"true"`
|
||||||
ExecForwardOutput bool `split_words:"true"`
|
ExecLabel string `split_words:"true"`
|
||||||
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
ExecForwardOutput bool `split_words:"true"`
|
||||||
AzureStorageAccountName string `split_words:"true"`
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
AzureStorageAccountName string `split_words:"true"`
|
||||||
AzureStorageConnectionString string `split_words:"true"`
|
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
||||||
AzureStorageContainerName string `split_words:"true"`
|
AzureStorageConnectionString string `split_words:"true"`
|
||||||
AzureStoragePath string `split_words:"true"`
|
AzureStorageContainerName string `split_words:"true"`
|
||||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
AzureStoragePath string `split_words:"true"`
|
||||||
AzureStorageAccessTier string `split_words:"true"`
|
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
AzureStorageAccessTier AzureStorageAccessTier `split_words:"true"`
|
||||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||||
DropboxRefreshToken string `split_words:"true"`
|
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||||
DropboxAppKey string `split_words:"true"`
|
DropboxRefreshToken string `split_words:"true"`
|
||||||
DropboxAppSecret string `split_words:"true"`
|
DropboxAppKey string `split_words:"true"`
|
||||||
DropboxRemotePath string `split_words:"true"`
|
DropboxAppSecret string `split_words:"true"`
|
||||||
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
DropboxRemotePath string `split_words:"true"`
|
||||||
|
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
||||||
source string
|
source string
|
||||||
additionalEnvVars map[string]string
|
additionalEnvVars map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Config) validate() error {
|
||||||
|
if c.AzureStoragePrimaryAccountKey != "" && c.AzureStorageConnectionString != "" {
|
||||||
|
return errwrap.Wrap(nil, "using azure primary account key and connection string are mutually exclusive")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type CompressionType string
|
type CompressionType string
|
||||||
|
|
||||||
func (c *CompressionType) Decode(v string) error {
|
func (c *CompressionType) Decode(v string) error {
|
||||||
switch v {
|
switch v {
|
||||||
case "gz", "zst":
|
case "none", "gz", "zst":
|
||||||
*c = CompressionType(v)
|
*c = CompressionType(v)
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
@@ -179,6 +188,30 @@ func (n *WholeNumber) Int() int {
|
|||||||
return int(*n)
|
return int(*n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type AzureStorageAccessTier string
|
||||||
|
|
||||||
|
func (t *AzureStorageAccessTier) Decode(v string) error {
|
||||||
|
if v == "" {
|
||||||
|
*t = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, a := range blob.PossibleAccessTierValues() {
|
||||||
|
if string(a) == v {
|
||||||
|
*t = AzureStorageAccessTier(v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AzureStorageAccessTier) AccessTier() *blob.AccessTier {
|
||||||
|
if *t == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
a := blob.AccessTier(*t)
|
||||||
|
return &a
|
||||||
|
}
|
||||||
|
|
||||||
type envVarLookup struct {
|
type envVarLookup struct {
|
||||||
ok bool
|
ok bool
|
||||||
key string
|
key string
|
||||||
|
|||||||
@@ -4,20 +4,75 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||||
"github.com/offen/docker-volume-backup/internal/errwrap"
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
func (s *script) encryptAsymmetrically(outFile *os.File) (io.WriteCloser, func() error, error) {
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
|
||||||
|
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(s.c.GpgPublicKeyRing)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrap(err, "error parsing armored keyring")
|
||||||
|
}
|
||||||
|
|
||||||
|
armoredWriter, err := armor.Encode(outFile, "PGP MESSAGE", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrap(err, "error preparing encryption")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.Encrypt(armoredWriter, entityList, nil, nil, &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, func() error {
|
||||||
|
if err := dst.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return armoredWriter.Close()
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) encryptSymmetrically(outFile *os.File) (io.WriteCloser, func() error, error) {
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, dst.Close, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase or publickey(s).
|
||||||
|
// In case no passphrase or publickey is given it returns early, leaving the backup file
|
||||||
// untouched.
|
// untouched.
|
||||||
func (s *script) encryptArchive() error {
|
func (s *script) encryptArchive() error {
|
||||||
if s.c.GpgPassphrase == "" {
|
|
||||||
|
var encrypt func(outFile *os.File) (io.WriteCloser, func() error, error)
|
||||||
|
var cleanUpErr error
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case s.c.GpgPassphrase != "" && s.c.GpgPublicKeyRing != "":
|
||||||
|
return errwrap.Wrap(nil, "error in selecting asymmetric and symmetric encryption methods: conflicting env vars are set")
|
||||||
|
case s.c.GpgPassphrase != "":
|
||||||
|
encrypt = s.encryptSymmetrically
|
||||||
|
case s.c.GpgPublicKeyRing != "":
|
||||||
|
encrypt = s.encryptAsymmetrically
|
||||||
|
default:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,22 +91,31 @@ func (s *script) encryptArchive() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error opening out file")
|
return errwrap.Wrap(err, "error opening out file")
|
||||||
}
|
}
|
||||||
defer outFile.Close()
|
defer func() {
|
||||||
|
if err := outFile.Close(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing out file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
_, name := path.Split(s.file)
|
dst, dstCloseCallback, err := encrypt(outFile)
|
||||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
|
||||||
FileName: name,
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, "error encrypting backup file")
|
return errwrap.Wrap(err, "error encrypting backup file")
|
||||||
}
|
}
|
||||||
defer dst.Close()
|
defer func() {
|
||||||
|
if err := dstCloseCallback(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing encrypted backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
src, err := os.Open(s.file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
||||||
}
|
}
|
||||||
defer src.Close()
|
defer func() {
|
||||||
|
if err := src.Close(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
return errwrap.Wrap(err, "error writing ciphertext to file")
|
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||||
@@ -59,7 +123,7 @@ func (s *script) encryptArchive() error {
|
|||||||
|
|
||||||
s.file = gpgFile
|
s.file = gpgFile
|
||||||
s.logger.Info(
|
s.logger.Info(
|
||||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
fmt.Sprintf("Encrypted backup using gpg, saving as `%s`.", s.file),
|
||||||
)
|
)
|
||||||
return nil
|
return cleanUpErr
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,7 +86,12 @@ func (s *script) init() error {
|
|||||||
|
|
||||||
var bf bytes.Buffer
|
var bf bytes.Buffer
|
||||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
"Extension": func() string {
|
||||||
|
if s.c.BackupCompression == "none" {
|
||||||
|
return "tar"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("tar.%s", s.c.BackupCompression)
|
||||||
|
}(),
|
||||||
}); tErr != nil {
|
}); tErr != nil {
|
||||||
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||||
}
|
}
|
||||||
@@ -194,7 +199,7 @@ func (s *script) init() error {
|
|||||||
Endpoint: s.c.AzureStorageEndpoint,
|
Endpoint: s.c.AzureStorageEndpoint,
|
||||||
RemotePath: s.c.AzureStoragePath,
|
RemotePath: s.c.AzureStoragePath,
|
||||||
ConnectionString: s.c.AzureStorageConnectionString,
|
ConnectionString: s.c.AzureStorageConnectionString,
|
||||||
AccessTier: s.c.AzureStorageAccessTier,
|
AccessTier: s.c.AzureStorageAccessTier.AccessTier(),
|
||||||
}
|
}
|
||||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ nav_order: 7
|
|||||||
# Encrypt backups using GPG
|
# Encrypt backups using GPG
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box.
|
The image supports encrypting backups using GPG out of the box.
|
||||||
In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY_RING` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||||
|
|
||||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||||
|
|
||||||
|
|||||||
@@ -289,7 +289,7 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Encrypting your backups using GPG
|
## Encrypting your backups symmetrically using GPG
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
@@ -311,6 +311,33 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Encrypting your backups asymmetrically using GPG
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
GPG_PUBLIC_KEY_RING: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
...
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
## Using mysqldump to prepare the backup
|
## Using mysqldump to prepare the backup
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
|||||||
@@ -43,8 +43,8 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
|
||||||
# The compression algorithm used in conjunction with tar.
|
# The compression algorithm used in conjunction with tar.
|
||||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
# Valid options are: "gz" (Gzip), "zst" (Zstd) or "none" (tar only).
|
||||||
# Note that the selection affects the file extension.
|
# Default is "gz". Note that the selection affects the file extension.
|
||||||
|
|
||||||
# BACKUP_COMPRESSION="gz"
|
# BACKUP_COMPRESSION="gz"
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# will result in the same filename for every backup run, which means previous
|
# will result in the same filename for every backup run, which means previous
|
||||||
# versions will be overwritten on subsequent runs.
|
# versions will be overwritten on subsequent runs.
|
||||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
# in which case it will become either "tar.gz", "tar.zst" or ".tar" (depending
|
||||||
# on your BACKUP_COMPRESSION setting).
|
# on your BACKUP_COMPRESSION setting).
|
||||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||||
|
|
||||||
@@ -337,10 +337,19 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
########### BACKUP ENCRYPTION
|
########### BACKUP ENCRYPTION
|
||||||
|
|
||||||
# Backups can be encrypted using gpg in case a passphrase is given.
|
# Backups can be encrypted symmetrically using gpg in case a passphrase is given.
|
||||||
|
|
||||||
# GPG_PASSPHRASE="<xxx>"
|
# GPG_PASSPHRASE="<xxx>"
|
||||||
|
|
||||||
|
# Backups can be encrypted asymmetrically using gpg in case publickeys are given.
|
||||||
|
|
||||||
|
# GPG_PUBLIC_KEY_RING= |
|
||||||
|
#-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
#
|
||||||
|
#D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
#...
|
||||||
|
#-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||||
|
|
||||||
# Containers or services can be stopped by applying a
|
# Containers or services can be stopped by applying a
|
||||||
|
|||||||
@@ -39,15 +39,11 @@ type Config struct {
|
|||||||
ConnectionString string
|
ConnectionString string
|
||||||
Endpoint string
|
Endpoint string
|
||||||
RemotePath string
|
RemotePath string
|
||||||
AccessTier string
|
AccessTier *blob.AccessTier
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
||||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
if opts.PrimaryAccountKey != "" && opts.ConnectionString != "" {
|
|
||||||
return nil, errwrap.Wrap(nil, "using primary account key and connection string are mutually exclusive")
|
|
||||||
}
|
|
||||||
|
|
||||||
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
||||||
@@ -85,26 +81,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadStreamOptions *blockblob.UploadStreamOptions
|
|
||||||
if opts.AccessTier != "" {
|
|
||||||
var found bool
|
|
||||||
for _, t := range blob.PossibleAccessTierValues() {
|
|
||||||
if string(t) == opts.AccessTier {
|
|
||||||
found = true
|
|
||||||
uploadStreamOptions = &blockblob.UploadStreamOptions{
|
|
||||||
AccessTier: &t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", opts.AccessTier))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
storage := azureBlobStorage{
|
storage := azureBlobStorage{
|
||||||
client: client,
|
client: client,
|
||||||
uploadStreamOptions: uploadStreamOptions,
|
uploadStreamOptions: &blockblob.UploadStreamOptions{
|
||||||
containerName: opts.ContainerName,
|
AccessTier: opts.AccessTier,
|
||||||
|
},
|
||||||
|
containerName: opts.ContainerName,
|
||||||
StorageBackend: &storage.StorageBackend{
|
StorageBackend: &storage.StorageBackend{
|
||||||
DestinationPath: opts.RemotePath,
|
DestinationPath: opts.RemotePath,
|
||||||
Log: logFunc,
|
Log: logFunc,
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ RUN apk add \
|
|||||||
coreutils \
|
coreutils \
|
||||||
curl \
|
curl \
|
||||||
gpg \
|
gpg \
|
||||||
|
gpg-agent \
|
||||||
jq \
|
jq \
|
||||||
moreutils \
|
moreutils \
|
||||||
tar \
|
tar \
|
||||||
|
|||||||
25
test/gpg-asym/docker-compose.yml
Normal file
25
test/gpg-asym/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
GPG_PUBLIC_KEY_RING_FILE: /keys/public_key.asc
|
||||||
|
volumes:
|
||||||
|
- ${KEY_DIR:-.}/public_key.asc:/keys/public_key.asc
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
49
test/gpg-asym/run.sh
Executable file
49
test/gpg-asym/run.sh
Executable file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export KEY_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export PASSPHRASE="test"
|
||||||
|
|
||||||
|
gpg --batch --gen-key <<EOF
|
||||||
|
Key-Type: RSA
|
||||||
|
Key-Length: 4096
|
||||||
|
Name-Real: offen
|
||||||
|
Name-Email: docker-volume-backup@local
|
||||||
|
Expire-Date: 0
|
||||||
|
Passphrase: $PASSPHRASE
|
||||||
|
%commit
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gpg --export --armor --batch --yes --pinentry-mode loopback --passphrase $PASSPHRASE --output $KEY_DIR/public_key.asc
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
TMP_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
gpg -d --pinentry-mode loopback --yes --passphrase $PASSPHRASE "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||||
|
|
||||||
|
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
21
test/tar/docker-compose.yml
Normal file
21
test/tar/docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.{{ .Extension }}
|
||||||
|
BACKUP_COMPRESSION: none
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
25
test/tar/run.sh
Executable file
25
test/tar/run.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
tar -xvf "$LOCAL_DIR/test.tar" -C $tmp_dir
|
||||||
|
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
pass "Expected file was found."
|
||||||
Reference in New Issue
Block a user