|
|
|
|
@@ -11,11 +11,9 @@ import (
|
|
|
|
|
"io"
|
|
|
|
|
"io/ioutil"
|
|
|
|
|
"os"
|
|
|
|
|
"os/exec"
|
|
|
|
|
"path"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
|
|
|
|
"time"
|
|
|
|
|
|
|
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
|
@@ -23,6 +21,7 @@ import (
|
|
|
|
|
"github.com/docker/docker/api/types/swarm"
|
|
|
|
|
"github.com/docker/docker/client"
|
|
|
|
|
"github.com/joho/godotenv"
|
|
|
|
|
"github.com/leekchan/timeutil"
|
|
|
|
|
minio "github.com/minio/minio-go/v7"
|
|
|
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
|
@@ -41,18 +40,26 @@ func main() {
|
|
|
|
|
s.must(s.copyBackup())
|
|
|
|
|
s.must(s.cleanBackup())
|
|
|
|
|
s.must(s.pruneOldBackups())
|
|
|
|
|
s.logger.Info("Finished running backup tasks.")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type script struct {
|
|
|
|
|
ctx context.Context
|
|
|
|
|
cli *client.Client
|
|
|
|
|
mc *minio.Client
|
|
|
|
|
logger *logrus.Logger
|
|
|
|
|
file string
|
|
|
|
|
bucket string
|
|
|
|
|
archive string
|
|
|
|
|
sources string
|
|
|
|
|
passphrase string
|
|
|
|
|
ctx context.Context
|
|
|
|
|
cli *client.Client
|
|
|
|
|
mc *minio.Client
|
|
|
|
|
logger *logrus.Logger
|
|
|
|
|
|
|
|
|
|
start time.Time
|
|
|
|
|
|
|
|
|
|
file string
|
|
|
|
|
bucket string
|
|
|
|
|
archive string
|
|
|
|
|
sources string
|
|
|
|
|
passphrase []byte
|
|
|
|
|
retentionDays *int
|
|
|
|
|
leeway *time.Duration
|
|
|
|
|
containerLabel string
|
|
|
|
|
pruningPrefix string
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// lock opens a lockfile at the given location, keeping it locked until the
|
|
|
|
|
@@ -117,7 +124,27 @@ func (s *script) init() error {
|
|
|
|
|
s.file = path.Join("/tmp", file)
|
|
|
|
|
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
|
|
|
|
s.sources = os.Getenv("BACKUP_SOURCES")
|
|
|
|
|
s.passphrase = os.Getenv("GPG_PASSPHRASE")
|
|
|
|
|
if v := os.Getenv("GPG_PASSPHRASE"); v != "" {
|
|
|
|
|
s.passphrase = []byte(v)
|
|
|
|
|
}
|
|
|
|
|
if v := os.Getenv("BACKUP_RETENTION_DAYS"); v != "" {
|
|
|
|
|
i, err := strconv.Atoi(v)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("init: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.retentionDays = &i
|
|
|
|
|
}
|
|
|
|
|
if v := os.Getenv("BACKUP_PRUNING_LEEWAY"); v != "" {
|
|
|
|
|
d, err := time.ParseDuration(v)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("init: error parsing BACKUP_PRUNING_LEEWAY as duration: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.leeway = &d
|
|
|
|
|
}
|
|
|
|
|
s.containerLabel = os.Getenv("BACKUP_STOP_CONTAINER_LABEL")
|
|
|
|
|
s.pruningPrefix = os.Getenv("BACKUP_PRUNING_PREFIX")
|
|
|
|
|
s.start = time.Now()
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -126,8 +153,9 @@ func (s *script) init() error {
|
|
|
|
|
// sure containers are being restarted if required.
|
|
|
|
|
func (s *script) stopContainersAndRun(thunk func() error) error {
|
|
|
|
|
if s.cli == nil {
|
|
|
|
|
return nil
|
|
|
|
|
return thunk()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
|
|
|
|
Quiet: true,
|
|
|
|
|
})
|
|
|
|
|
@@ -137,7 +165,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|
|
|
|
|
|
|
|
|
containerLabel := fmt.Sprintf(
|
|
|
|
|
"docker-volume-backup.stop-during-backup=%s",
|
|
|
|
|
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
|
|
|
|
|
s.containerLabel,
|
|
|
|
|
)
|
|
|
|
|
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
|
|
|
|
Quiet: true,
|
|
|
|
|
@@ -151,7 +179,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|
|
|
|
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof(
|
|
|
|
|
"Stopping %d containers labeled `%s` out of %d running containers.",
|
|
|
|
|
"Stopping %d containers labeled `%s` out of %d running container(s).",
|
|
|
|
|
len(containersToStop),
|
|
|
|
|
containerLabel,
|
|
|
|
|
len(allContainers),
|
|
|
|
|
@@ -214,7 +242,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|
|
|
|
err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof("Successfully restarted %d containers.", len(stoppedContainers))
|
|
|
|
|
s.logger.Infof("Restarted %d container(s) and the matching service(s).", len(stoppedContainers))
|
|
|
|
|
return nil
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
@@ -236,15 +264,11 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|
|
|
|
// takeBackup creates a tar archive of the configured backup location and
|
|
|
|
|
// saves it to disk.
|
|
|
|
|
func (s *script) takeBackup() error {
|
|
|
|
|
outBytes, err := exec.Command("date", fmt.Sprintf("+%s", s.file)).Output()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.file = strings.TrimSpace(string(outBytes))
|
|
|
|
|
s.file = timeutil.Strftime(&s.start, s.file)
|
|
|
|
|
if err := targz.Compress(s.sources, s.file); err != nil {
|
|
|
|
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof("Successfully created backup of `%s` at `%s`.", s.sources, s.file)
|
|
|
|
|
s.logger.Infof("Created backup of `%s` at `%s`.", s.sources, s.file)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -252,7 +276,7 @@ func (s *script) takeBackup() error {
|
|
|
|
|
// In case no passphrase is given it returns early, leaving the backup file
|
|
|
|
|
// untouched.
|
|
|
|
|
func (s *script) encryptBackup() error {
|
|
|
|
|
if s.passphrase == "" {
|
|
|
|
|
if s.passphrase == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -286,8 +310,9 @@ func (s *script) encryptBackup() error {
|
|
|
|
|
if err := os.Remove(s.file); err != nil {
|
|
|
|
|
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.file = gpgFile
|
|
|
|
|
s.logger.Infof("Successfully encrypted backup using given passphrase, saving as `%s`.", s.file)
|
|
|
|
|
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -302,16 +327,14 @@ func (s *script) copyBackup() error {
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof("Successfully uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
|
|
|
|
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if s.archive != "" {
|
|
|
|
|
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
|
|
|
|
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
|
|
|
|
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
|
|
|
|
}
|
|
|
|
|
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
|
|
|
|
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
|
|
|
|
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof("Successfully stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
|
|
|
|
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
@@ -321,7 +344,7 @@ func (s *script) cleanBackup() error {
|
|
|
|
|
if err := os.Remove(s.file); err != nil {
|
|
|
|
|
return fmt.Errorf("cleanBackup: error removing file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Info("Successfully cleaned up local artifacts.")
|
|
|
|
|
s.logger.Info("Cleaned up local artifacts.")
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -329,29 +352,22 @@ func (s *script) cleanBackup() error {
|
|
|
|
|
// the given configuration. In case the given configuration would delete all
|
|
|
|
|
// backups, it does nothing instead.
|
|
|
|
|
func (s *script) pruneOldBackups() error {
|
|
|
|
|
retention := os.Getenv("BACKUP_RETENTION_DAYS")
|
|
|
|
|
if retention == "" {
|
|
|
|
|
if s.retentionDays == nil {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
retentionDays, err := strconv.Atoi(retention)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
|
|
|
|
}
|
|
|
|
|
leeway := os.Getenv("BACKUP_PRUNING_LEEWAY")
|
|
|
|
|
sleepFor, err := time.ParseDuration(leeway)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof("Sleeping for %s before pruning backups.", leeway)
|
|
|
|
|
time.Sleep(sleepFor)
|
|
|
|
|
|
|
|
|
|
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
|
|
|
|
|
deadline := time.Now().AddDate(0, 0, -retentionDays)
|
|
|
|
|
if s.leeway != nil {
|
|
|
|
|
s.logger.Infof("Sleeping for %s before pruning backups.", s.leeway)
|
|
|
|
|
time.Sleep(*s.leeway)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.logger.Infof("Trying to prune backups older than %d day(s) now.", *s.retentionDays)
|
|
|
|
|
deadline := s.start.AddDate(0, 0, -*s.retentionDays)
|
|
|
|
|
|
|
|
|
|
if s.bucket != "" {
|
|
|
|
|
candidates := s.mc.ListObjects(s.ctx, s.bucket, minio.ListObjectsOptions{
|
|
|
|
|
WithMetadata: true,
|
|
|
|
|
Prefix: os.Getenv("BACKUP_PRUNING_PREFIX"),
|
|
|
|
|
Prefix: s.pruningPrefix,
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
var matches []minio.ObjectInfo
|
|
|
|
|
@@ -384,13 +400,13 @@ func (s *script) pruneOldBackups() error {
|
|
|
|
|
|
|
|
|
|
if len(errors) != 0 {
|
|
|
|
|
return fmt.Errorf(
|
|
|
|
|
"pruneOldBackups: %d errors removing files from remote storage: %w",
|
|
|
|
|
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
|
|
|
|
|
len(errors),
|
|
|
|
|
errors[0],
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof(
|
|
|
|
|
"Successfully pruned %d out of %d remote backups as their age exceeded the configured retention period.",
|
|
|
|
|
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period.",
|
|
|
|
|
len(matches),
|
|
|
|
|
lenCandidates,
|
|
|
|
|
)
|
|
|
|
|
@@ -400,13 +416,13 @@ func (s *script) pruneOldBackups() error {
|
|
|
|
|
len(matches),
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
|
|
|
|
|
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if s.archive != "" {
|
|
|
|
|
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
|
|
|
|
candidates, err := filepath.Glob(
|
|
|
|
|
path.Join(s.archive, fmt.Sprintf("%s*", os.Getenv("BACKUP_PRUNING_PREFIX"))),
|
|
|
|
|
path.Join(s.archive, fmt.Sprintf("%s*", s.pruningPrefix)),
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf(
|
|
|
|
|
@@ -439,13 +455,13 @@ func (s *script) pruneOldBackups() error {
|
|
|
|
|
}
|
|
|
|
|
if len(errors) != 0 {
|
|
|
|
|
return fmt.Errorf(
|
|
|
|
|
"pruneOldBackups: %d errors deleting local files, starting with: %w",
|
|
|
|
|
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
|
|
|
|
|
len(errors),
|
|
|
|
|
errors[0],
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
s.logger.Infof(
|
|
|
|
|
"Successfully pruned %d out of %d local backups as their age exceeded the configured retention period.",
|
|
|
|
|
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period.",
|
|
|
|
|
len(matches),
|
|
|
|
|
len(candidates),
|
|
|
|
|
)
|
|
|
|
|
@@ -455,7 +471,7 @@ func (s *script) pruneOldBackups() error {
|
|
|
|
|
len(matches),
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
|
|
|
|
|
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
|