Compare commits

...

5 Commits

Author SHA1 Message Date
Frederik Ring
8caac8724c Add documentation for header format option 2022-04-19 21:21:23 +02:00
Frederik Ring
9eda23e512 Make header format for created tar archive configurable 2022-04-19 21:11:29 +02:00
Frederik Ring
1b1fc4856c List objects recursively when selecting candidates from S3 (#92) 2022-04-15 11:05:52 +02:00
Frederik Ring
e81c34b8fc Consider S3 Path when selecting candidates for pruning (#91) 2022-04-13 17:09:37 +02:00
Simon Dünhöft
9c23767fce Fixed wrong env name for S3 bucket in README (#89)
The README was using `AWS_BUCKET_NAME` instead of `AWS_S3_BUCKET_NAME` in the recipes. 
This resulted in no data being uploaded to S3.
2022-04-12 19:38:15 +02:00
4 changed files with 70 additions and 16 deletions

View File

@@ -351,6 +351,19 @@ You can populate below template according to your requirements and use it as you
# LOCK_TIMEOUT="60m"
########### HEADER FORMAT USED BY THE TAR ARCHIVE
# By default, tar archive creation will pick a header format that is appropriate
# for the archive's contents. In case you have special requirements or need to
# work with tools that do not support all standard header formats, you can use
# this option to enforce a certain header format. Valid options are "USTAR",
# "PAX" and "GNU". Be aware that setting this value might create situations where
# it's not possible to encode the information about a certain file, making the
# backup fail.
# In case no value is set, an appropriate format will be selected for each file.
# TAR_ARCHIVE_HEADER_FORMAT="USTAR"
########### EMAIL NOTIFICATIONS
# ************************************************************************
@@ -747,7 +760,7 @@ services:
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
volumes:
@@ -769,7 +782,7 @@ services:
image: offen/docker-volume-backup:v2
environment:
AWS_ENDPOINT: s3.filebase.com
AWS_BUCKET_NAME: filebase-bucket
AWS_S3_BUCKET_NAME: filebase-bucket
AWS_ACCESS_KEY_ID: FILEBASE-ACCESS-KEY
AWS_SECRET_ACCESS_KEY: FILEBASE-SECRET-KEY
volumes:
@@ -791,7 +804,7 @@ services:
image: offen/docker-volume-backup:v2
environment:
AWS_ENDPOINT: minio.example.com
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: MINIOACCESSKEY
AWS_SECRET_ACCESS_KEY: MINIOSECRETKEY
volumes:
@@ -855,7 +868,7 @@ services:
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
volumes:
@@ -879,7 +892,7 @@ services:
environment:
# take a backup on every hour
BACKUP_CRON_EXPRESSION: "0 * * * *"
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
volumes:
@@ -900,7 +913,7 @@ services:
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
BACKUP_FILENAME: backup-%Y-%m-%dT%H-%M-%S.tar.gz
@@ -924,7 +937,7 @@ services:
backup:
image: offen/docker-volume-backup:v2
environment:
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
GPG_PASSPHRASE: somesecretstring
@@ -973,7 +986,7 @@ services:
image: offen/docker-volume-backup:v2
environment: &backup_environment
BACKUP_CRON_EXPRESSION: "0 2 * * *"
AWS_BUCKET_NAME: backup-bucket
AWS_S3_BUCKET_NAME: backup-bucket
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
# Label the container using the `data_1` volume as `docker-volume-backup.stop-during-backup=service1`

View File

@@ -18,7 +18,7 @@ import (
"strings"
)
func createArchive(inputFilePath, outputFilePath string) error {
func createArchive(inputFilePath, outputFilePath string, options createArchiveOptions) error {
inputFilePath = stripTrailingSlashes(inputFilePath)
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
if err != nil {
@@ -28,7 +28,7 @@ func createArchive(inputFilePath, outputFilePath string) error {
return fmt.Errorf("createArchive: error creating output file path: %w", err)
}
if err := compress(inputFilePath, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
if err := compress(inputFilePath, outputFilePath, filepath.Dir(inputFilePath), options); err != nil {
return fmt.Errorf("createArchive: error creating archive: %w", err)
}
@@ -52,7 +52,7 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
return inputFilePath, outputFilePath, err
}
func compress(inPath, outFilePath, subPath string) error {
func compress(inPath, outFilePath, subPath string, options createArchiveOptions) error {
file, err := os.Create(outFilePath)
if err != nil {
return fmt.Errorf("compress: error creating out file: %w", err)
@@ -71,7 +71,7 @@ func compress(inPath, outFilePath, subPath string) error {
}
for _, p := range paths {
if err := writeTarGz(p, tarWriter, prefix); err != nil {
if err := writeTarGz(p, tarWriter, prefix, options.format); err != nil {
return fmt.Errorf("compress error writing %s to archive: %w", p, err)
}
}
@@ -94,7 +94,7 @@ func compress(inPath, outFilePath, subPath string) error {
return nil
}
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
func writeTarGz(path string, tarWriter *tar.Writer, prefix string, format tar.Format) error {
fileInfo, err := os.Lstat(path)
if err != nil {
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
@@ -113,6 +113,10 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
}
header, err := tar.FileInfoHeader(fileInfo, link)
if format >= 0 {
header.Format = format
}
if err != nil {
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
}
@@ -140,3 +144,7 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
return nil
}
type createArchiveOptions struct {
format tar.Format
}

View File

@@ -3,7 +3,11 @@
package main
import "time"
import (
"archive/tar"
"fmt"
"time"
)
// Config holds all configuration values that are expected to be set
// by users.
@@ -42,4 +46,30 @@ type Config struct {
ExecLabel string `split_words:"true"`
ExecForwardOutput bool `split_words:"true"`
LockTimeout time.Duration `split_words:"true" default:"60m"`
TarArchiveHeaderFormat TarFormat `split_words:"true"`
}
type TarFormat tar.Format
func (t *TarFormat) Decode(value string) error {
switch value {
case "PAX":
*t = TarFormat(tar.FormatPAX)
return nil
case "USTAR":
*t = TarFormat(tar.FormatUSTAR)
return nil
case "GNU":
*t = TarFormat(tar.FormatGNU)
return nil
case "":
*t = TarFormat(-1)
return nil
default:
return fmt.Errorf("tarFormat: unknown format %s", value)
}
}
func (t *TarFormat) Format() tar.Format {
return tar.Format(*t)
}

View File

@@ -388,7 +388,9 @@ func (s *script) takeBackup() error {
s.logger.Infof("Removed tar file `%s`.", tarFile)
return nil
})
if err := createArchive(backupSources, tarFile); err != nil {
if err := createArchive(backupSources, tarFile, createArchiveOptions{
format: s.c.TarArchiveHeaderFormat.Format(),
}); err != nil {
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
}
@@ -536,7 +538,8 @@ func (s *script) pruneBackups() error {
if s.minioClient != nil {
candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{
WithMetadata: true,
Prefix: s.c.BackupPruningPrefix,
Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix),
Recursive: true,
})
var matches []minio.ObjectInfo