mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
1 Commits
v2.33.0
...
serve-metr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4138fd733 |
54
.github/workflows/golangci-lint.yml
vendored
54
.github/workflows/golangci-lint.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: Run Linters
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Require: The version of golangci-lint to use.
|
||||
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
||||
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
||||
version: v1.54
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
# Optional: golangci-lint command line arguments.
|
||||
#
|
||||
# Note: By default, the `.golangci.yml` file should be at the root of the repository.
|
||||
# The location of the configuration file can be changed by using `--config=`
|
||||
# args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0
|
||||
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
# only-new-issues: true
|
||||
|
||||
# Optional: if set to true, then all caching functionality will be completely disabled,
|
||||
# takes precedence over all other caching options.
|
||||
# skip-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/go/pkg.
|
||||
# skip-pkg-cache: true
|
||||
|
||||
# Optional: if set to true, then the action won't cache or restore ~/.cache/go-build.
|
||||
# skip-build-cache: true
|
||||
|
||||
# Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'.
|
||||
# install-mode: "goinstall"
|
||||
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
@@ -15,7 +15,16 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Build Docker Image
|
||||
env:
|
||||
DOCKER_BUILDKIT: '1'
|
||||
run: docker build . -t offen/docker-volume-backup:test
|
||||
|
||||
- name: Run Tests
|
||||
working-directory: ./test
|
||||
run: |
|
||||
BUILD_IMAGE=1 ./test.sh
|
||||
# Stop the buildx container so the tests can make assertions
|
||||
# about the number of running containers
|
||||
docker rm -f $(docker ps -aq)
|
||||
export GPG_TTY=$(tty)
|
||||
./test.sh test
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
linters:
|
||||
# Enable specific linter
|
||||
# https://golangci-lint.run/usage/linters/#enabled-by-default
|
||||
enable:
|
||||
- staticcheck
|
||||
- govet
|
||||
output:
|
||||
format: github-actions
|
||||
@@ -1,7 +1,7 @@
|
||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
FROM golang:1.21-alpine as builder
|
||||
FROM golang:1.20-alpine as builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@@ -13,7 +13,7 @@ FROM alpine:3.18
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates busybox-extras
|
||||
|
||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||
COPY --chmod=755 ./entrypoint.sh /root/
|
||||
|
||||
125
README.md
125
README.md
@@ -4,10 +4,10 @@
|
||||
|
||||
# docker-volume-backup
|
||||
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage.
|
||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage or SSH compatible storage.
|
||||
|
||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||
|
||||
<!-- MarkdownTOC -->
|
||||
|
||||
@@ -36,7 +36,6 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
||||
- [Define different retention schedules](#define-different-retention-schedules)
|
||||
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
|
||||
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
|
||||
- [Setup Dropbox storage backend](#setup-dropbox-storage-backend)
|
||||
- [Recipes](#recipes)
|
||||
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
||||
- [Backing up to Filebase](#backing-up-to-filebase)
|
||||
@@ -45,7 +44,6 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
||||
- [Backing up to WebDAV](#backing-up-to-webdav)
|
||||
- [Backing up to SSH](#backing-up-to-ssh)
|
||||
- [Backing up to Azure Blob Storage](#backing-up-to-azure-blob-storage)
|
||||
- [Backing up to Dropbox](#backing-up-to-dropbox)
|
||||
- [Backing up locally](#backing-up-locally)
|
||||
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
||||
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
||||
@@ -139,9 +137,6 @@ Documentation references Docker Hub, but all examples will work using ghcr.io ju
|
||||
## Configuration reference
|
||||
|
||||
Backup targets, schedule and retention are configured in environment variables.
|
||||
|
||||
Note: You can use any environment variable from below also with a `_FILE` suffix to be able to load the value from a file. This is usually useful when using [Docker Secrets](https://docs.docker.com/engine/swarm/secrets/) or similar.
|
||||
|
||||
You can populate below template according to your requirements and use it as your `env_file`:
|
||||
|
||||
```ini
|
||||
@@ -153,29 +148,13 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||
|
||||
# The compression algorithm used in conjunction with tar.
|
||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
||||
# Note that the selection affects the file extension.
|
||||
|
||||
# BACKUP_COMPRESSION="gz"
|
||||
|
||||
# Parallelism level for "gz" (Gzip) compression.
|
||||
# Defines how many blocks of data are concurrently processed.
|
||||
# Higher values result in faster compression. No effect on decompression
|
||||
# Default = 1. Setting this to 0 will use all available threads.
|
||||
|
||||
# GZIP_PARALLELISM=1
|
||||
|
||||
# The name of the backup file including the extension.
|
||||
# The name of the backup file including the `.tar.gz` extension.
|
||||
# Format verbs will be replaced as in `strftime`. Omitting them
|
||||
# will result in the same filename for every backup run, which means previous
|
||||
# versions will be overwritten on subsequent runs.
|
||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
||||
# on your BACKUP_COMPRESSION setting).
|
||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||
# versions will be overwritten on subsequent runs. The default results
|
||||
# in filenames like `backup-2021-08-29T04-00-00.tar.gz`.
|
||||
|
||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"
|
||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
||||
|
||||
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
||||
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
||||
@@ -215,15 +194,6 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# BACKUP_EXCLUDE_REGEXP="\.log$"
|
||||
|
||||
# Exclude one or many storage backends from the pruning process.
|
||||
# E.g. with one backend excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3
|
||||
# E.g. with multiple backends excluded: BACKUP_SKIP_BACKENDS_FROM_PRUNE=s3,webdav
|
||||
# Available backends are: S3, WebDAV, SSH, Local, Dropbox, Azure
|
||||
# Note: The name of the backends is case insensitive.
|
||||
# Default: All backends get pruned.
|
||||
|
||||
# BACKUP_SKIP_BACKENDS_FROM_PRUNE=
|
||||
|
||||
########### BACKUP STORAGE
|
||||
|
||||
# The name of the remote bucket that should be used for storing backups. If
|
||||
@@ -243,6 +213,14 @@ You can populate below template according to your requirements and use it as you
|
||||
# AWS_ACCESS_KEY_ID="<xxx>"
|
||||
# AWS_SECRET_ACCESS_KEY="<xxx>"
|
||||
|
||||
# It is possible to provide the keys in files, allowing to hide the sensitive data.
|
||||
# These values have a higher priority than the ones above, meaning if both are set
|
||||
# the values from the files will be used.
|
||||
# This option is most useful with Docker [secrets](https://docs.docker.com/engine/swarm/secrets/).
|
||||
|
||||
# AWS_ACCESS_KEY_ID_FILE="/path/to/file"
|
||||
# AWS_SECRET_ACCESS_KEY_FILE="/path/to/file"
|
||||
|
||||
# Instead of providing static credentials, you can also use IAM instance profiles
|
||||
# or similar to provide authentication. Some possible configuration options on AWS:
|
||||
# - EC2: http://169.254.169.254
|
||||
@@ -369,26 +347,6 @@ You can populate below template according to your requirements and use it as you
|
||||
|
||||
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
|
||||
|
||||
# Absolute remote path in your Dropbox where the backups shall be stored.
|
||||
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||
# Consulte the README for further information.
|
||||
|
||||
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||
|
||||
# Number of concurrent chunked uploads for Dropbox.
|
||||
# Values above 6 usually result in no enhancements.
|
||||
|
||||
# DROPBOX_CONCURRENCY_LEVEL="6"
|
||||
|
||||
# App key and app secret from your app created at https://www.dropbox.com/developers/apps/info
|
||||
|
||||
# DROPBOX_APP_KEY=""
|
||||
# DROPBOX_APP_SECRET=""
|
||||
|
||||
# Refresh token to request new short-lived tokens (OAuth2). Consult README to see how to get one.
|
||||
|
||||
# DROPBOX_REFRESH_TOKEN=""
|
||||
|
||||
# In addition to storing backups remotely, you can also keep local copies.
|
||||
# Pass a container-local path to store your backups if needed. You also need to
|
||||
# mount a local folder or Docker volume into that location (`/archive`
|
||||
@@ -1053,37 +1011,6 @@ volumes:
|
||||
|
||||
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
|
||||
|
||||
### Setup Dropbox storage backend
|
||||
|
||||
#### Auth-Setup:
|
||||
|
||||
1. Create a new Dropbox App in the [App Console](https://www.dropbox.com/developers/apps)
|
||||
2. Open your new Dropbox App and set `DROPBOX_APP_KEY` and `DROPBOX_APP_SECRET` in your environment (e.g. docker-compose.yml) accordingly
|
||||
3. Click on `Permissions` in your app and make sure, that the following permissions are cranted (or more):
|
||||
- `files.metadata.write`
|
||||
- `files.metadata.read`
|
||||
- `files.content.write`
|
||||
- `files.content.read`
|
||||
4. Replace APPKEY in `https://www.dropbox.com/oauth2/authorize?client_id=APPKEY&token_access_type=offline&response_type=code` with the app key from step 2
|
||||
5. Visit the URL and confirm the access of your app. This gives you an `auth code` -> save it somewhere!
|
||||
6. Replace AUTHCODE, APPKEY, APPSECRET accordingly and perform the request:
|
||||
```
|
||||
curl https://api.dropbox.com/oauth2/token \
|
||||
-d code=AUTHCODE \
|
||||
-d grant_type=authorization_code \
|
||||
-d client_id=APPKEY \
|
||||
-d client_secret=APPSECRET
|
||||
```
|
||||
7. Execute the request. You will get a JSON formatted reply. Use the value of the `refresh_token` for the last environment variable `DROPBOX_REFRESH_TOKEN`
|
||||
8. You should now have `DROPBOX_APP_KEY`, `DROPBOX_APP_SECRET` and `DROPBOX_REFRESH_TOKEN` set. These don't expire.
|
||||
|
||||
Note: Using the "Generated access token" in the app console is not supported, as it is only very short lived and therefore not suitable for an automatic backup solution. The refresh token handles this automatically - the setup procedure above is only needed once.
|
||||
|
||||
#### Other parameters
|
||||
|
||||
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, you can only write in the app's directory!
|
||||
This means, that `DROPBOX_REMOTE_PATH` must start with e.g. `/Apps/YOUR_APP_NAME` or `/Apps/YOUR_APP_NAME/some_sub_dir`
|
||||
|
||||
## Recipes
|
||||
|
||||
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
||||
@@ -1251,30 +1178,6 @@ volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
### Backing up to Dropbox
|
||||
|
||||
See [Dropbox Setup](#setup-dropbox-storage-backend) on how to get the appropriate environment values.
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
# ... define other services using the `data` volume here
|
||||
backup:
|
||||
image: offen/docker-volume-backup:v2
|
||||
environment:
|
||||
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
||||
DROPBOX_APP_KEY: APP_KEY # replace
|
||||
DROPBOX_APP_SECRET: APP_SECRET # replace
|
||||
DROPBOX_REMOTE_PATH: /Apps/my-test-app/some_subdir # replace
|
||||
volumes:
|
||||
- data:/backup/my-app-backup:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
volumes:
|
||||
data:
|
||||
```
|
||||
|
||||
### Backing up locally
|
||||
|
||||
```yml
|
||||
|
||||
@@ -8,20 +8,16 @@ package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||
func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||
if err != nil {
|
||||
@@ -31,7 +27,7 @@ func createArchive(files []string, inputFilePath, outputFilePath string, compres
|
||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||
}
|
||||
|
||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||
}
|
||||
|
||||
@@ -55,21 +51,18 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
||||
return inputFilePath, outputFilePath, err
|
||||
}
|
||||
|
||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
||||
func compress(paths []string, outFilePath, subPath string) error {
|
||||
file, err := os.Create(outFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||
}
|
||||
|
||||
prefix := path.Dir(outFilePath)
|
||||
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error getting compression writer: %w", err)
|
||||
}
|
||||
tarWriter := tar.NewWriter(compressWriter)
|
||||
gzipWriter := gzip.NewWriter(file)
|
||||
tarWriter := tar.NewWriter(gzipWriter)
|
||||
|
||||
for _, p := range paths {
|
||||
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
||||
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
||||
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
||||
}
|
||||
}
|
||||
@@ -79,9 +72,9 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
||||
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
||||
}
|
||||
|
||||
err = compressWriter.Close()
|
||||
err = gzipWriter.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("compress: error closing compression writer: %w", err)
|
||||
return fmt.Errorf("compress: error closing gzip writer: %w", err)
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
@@ -92,38 +85,10 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||
switch algo {
|
||||
case "gz":
|
||||
w, err := pgzip.NewWriterLevel(file, 5)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: gzip error: %w", err)
|
||||
}
|
||||
|
||||
if concurrency == 0 {
|
||||
concurrency = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
|
||||
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: error setting concurrency: %w", err)
|
||||
}
|
||||
|
||||
return w, nil
|
||||
case "zst":
|
||||
compressWriter, err := zstd.NewWriter(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getCompressionWriter: zstd error: %w", err)
|
||||
}
|
||||
return compressWriter, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("getCompressionWriter: unsupported compression algorithm: %s", algo)
|
||||
}
|
||||
}
|
||||
|
||||
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
fileInfo, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err)
|
||||
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
|
||||
}
|
||||
|
||||
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
@@ -134,19 +99,19 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
var err error
|
||||
if link, err = os.Readlink(path); err != nil {
|
||||
return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err)
|
||||
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error getting file info header: %w", err)
|
||||
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
|
||||
}
|
||||
header.Name = strings.TrimPrefix(path, prefix)
|
||||
|
||||
err = tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error writing file info header: %w", err)
|
||||
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
@@ -155,13 +120,13 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error opening %s: %w", path, err)
|
||||
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err)
|
||||
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -24,13 +24,13 @@ type Config struct {
|
||||
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
|
||||
AwsStorageClass string `split_words:"true"`
|
||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
|
||||
AwsSecretAccessKey string `split_words:"true"`
|
||||
AwsSecretAccessKeyFile string `split_words:"true"`
|
||||
AwsIamRoleEndpoint string `split_words:"true"`
|
||||
AwsPartSize int64 `split_words:"true"`
|
||||
BackupCompression CompressionType `split_words:"true" default:"gz"`
|
||||
GzipParallelism WholeNumber `split_words:"true" default:"1"`
|
||||
BackupSources string `split_words:"true" default:"/backup"`
|
||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"`
|
||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
||||
BackupFilenameExpand bool `split_words:"true"`
|
||||
BackupLatestSymlink string `split_words:"true"`
|
||||
BackupArchive string `split_words:"true" default:"/archive"`
|
||||
@@ -40,7 +40,6 @@ type Config struct {
|
||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||
BackupFromSnapshot bool `split_words:"true"`
|
||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||
GpgPassphrase string `split_words:"true"`
|
||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||
NotificationLevel string `split_words:"true" default:"error"`
|
||||
@@ -70,29 +69,17 @@ type Config struct {
|
||||
AzureStorageContainerName string `split_words:"true"`
|
||||
AzureStoragePath string `split_words:"true"`
|
||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||
DropboxRefreshToken string `split_words:"true"`
|
||||
DropboxAppKey string `split_words:"true"`
|
||||
DropboxAppSecret string `split_words:"true"`
|
||||
DropboxRemotePath string `split_words:"true"`
|
||||
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
||||
}
|
||||
|
||||
type CompressionType string
|
||||
|
||||
func (c *CompressionType) Decode(v string) error {
|
||||
switch v {
|
||||
case "gz", "zst":
|
||||
*c = CompressionType(v)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("config: error decoding compression type %s", v)
|
||||
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
|
||||
if secretPath == "" {
|
||||
return envVar, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CompressionType) String() string {
|
||||
return string(*c)
|
||||
data, err := os.ReadFile(secretPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("resolveSecret: error reading secret path: %w", err)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
type CertDecoder struct {
|
||||
@@ -103,7 +90,7 @@ func (c *CertDecoder) Decode(v string) error {
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
content, err := os.ReadFile(v)
|
||||
content, err := ioutil.ReadFile(v)
|
||||
if err != nil {
|
||||
content = []byte(v)
|
||||
}
|
||||
@@ -131,41 +118,3 @@ func (r *RegexpDecoder) Decode(v string) error {
|
||||
*r = RegexpDecoder{Re: re}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NaturalNumber is a type that can be used to decode a positive, non-zero natural number
|
||||
type NaturalNumber int
|
||||
|
||||
func (n *NaturalNumber) Decode(v string) error {
|
||||
asInt, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error converting %s to int", v)
|
||||
}
|
||||
if asInt <= 0 {
|
||||
return fmt.Errorf("config: expected a natural number, got %d", asInt)
|
||||
}
|
||||
*n = NaturalNumber(asInt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NaturalNumber) Int() int {
|
||||
return int(*n)
|
||||
}
|
||||
|
||||
// WholeNumber is a type that can be used to decode a positive whole number, including zero
|
||||
type WholeNumber int
|
||||
|
||||
func (n *WholeNumber) Decode(v string) error {
|
||||
asInt, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config: error converting %s to int", v)
|
||||
}
|
||||
if asInt < 0 {
|
||||
return fmt.Errorf("config: expected a whole, positive number, including zero. Got %d", asInt)
|
||||
}
|
||||
*n = WholeNumber(asInt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *WholeNumber) Int() int {
|
||||
return int(*n)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -51,15 +51,19 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
||||
outputDone <- err
|
||||
}()
|
||||
|
||||
if <-outputDone != nil {
|
||||
select {
|
||||
case err := <-outputDone:
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
stdout, err := io.ReadAll(&outBuf)
|
||||
stdout, err := ioutil.ReadAll(&outBuf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
||||
}
|
||||
stderr, err := io.ReadAll(&errBuf)
|
||||
stderr, err := ioutil.ReadAll(&errBuf)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
||||
}
|
||||
@@ -148,15 +152,15 @@ func (s *script) runLabeledCommands(label string) error {
|
||||
g.Go(func() error {
|
||||
cmd, ok := c.Labels[label]
|
||||
if !ok && label == "docker-volume-backup.archive-pre" {
|
||||
cmd = c.Labels["docker-volume-backup.exec-pre"]
|
||||
cmd, _ = c.Labels["docker-volume-backup.exec-pre"]
|
||||
} else if !ok && label == "docker-volume-backup.archive-post" {
|
||||
cmd = c.Labels["docker-volume-backup.exec-post"]
|
||||
cmd, _ = c.Labels["docker-volume-backup.exec-post"]
|
||||
}
|
||||
|
||||
userLabelName := fmt.Sprintf("%s.user", label)
|
||||
user := c.Labels[userLabelName]
|
||||
|
||||
s.logger.Info(fmt.Sprintf("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/")))
|
||||
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
||||
stdout, stderr, err := s.exec(c.ID, cmd, user)
|
||||
if s.c.ExecForwardOutput {
|
||||
os.Stderr.Write(stderr)
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
func (s *script) lock(lockfile string) (func() error, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
s.stats.LockedTime = time.Since(start)
|
||||
s.stats.LockedTime = time.Now().Sub(start)
|
||||
}()
|
||||
|
||||
retry := time.NewTicker(5 * time.Second)
|
||||
@@ -41,11 +41,9 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
||||
}
|
||||
|
||||
if !s.encounteredLock {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
s.logger.Infof(
|
||||
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
||||
s.c.LockTimeout,
|
||||
),
|
||||
)
|
||||
s.encounteredLock = true
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
@@ -15,16 +14,14 @@ func main() {
|
||||
}
|
||||
|
||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
||||
defer s.must(unlock())
|
||||
defer unlock()
|
||||
s.must(err)
|
||||
|
||||
defer func() {
|
||||
if pArg := recover(); pArg != nil {
|
||||
if err, ok := pArg.(error); ok {
|
||||
if hookErr := s.runHooks(err); hookErr != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
|
||||
)
|
||||
s.logger.Errorf("An error occurred calling the registered hooks: %s", hookErr)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -32,11 +29,9 @@ func main() {
|
||||
}
|
||||
|
||||
if err := s.runHooks(nil); err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf(
|
||||
s.logger.Errorf(
|
||||
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
||||
err,
|
||||
),
|
||||
)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -4,30 +4,24 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/local"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/s3"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
||||
"github.com/offen/docker-volume-backup/internal/storage/webdav"
|
||||
|
||||
"github.com/ProtonMail/go-crypto/openpgp"
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/containrrr/shoutrrr/pkg/router"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -35,9 +29,11 @@ import (
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
"github.com/leekchan/timeutil"
|
||||
"github.com/offen/envconfig"
|
||||
"github.com/otiai10/copy"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -46,7 +42,7 @@ import (
|
||||
type script struct {
|
||||
cli *client.Client
|
||||
storages []storage.Backend
|
||||
logger *slog.Logger
|
||||
logger *logrus.Logger
|
||||
sender *router.ServiceRouter
|
||||
template *template.Template
|
||||
hooks []hook
|
||||
@@ -68,7 +64,12 @@ func newScript() (*script, error) {
|
||||
stdOut, logBuffer := buffer(os.Stdout)
|
||||
s := &script{
|
||||
c: &Config{},
|
||||
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||
logger: &logrus.Logger{
|
||||
Out: stdOut,
|
||||
Formatter: new(logrus.TextFormatter),
|
||||
Hooks: make(logrus.LevelHooks),
|
||||
Level: logrus.InfoLevel,
|
||||
},
|
||||
stats: &Stats{
|
||||
StartTime: time.Now(),
|
||||
LogOutput: logBuffer,
|
||||
@@ -78,7 +79,6 @@ func newScript() (*script, error) {
|
||||
"SSH": {},
|
||||
"Local": {},
|
||||
"Azure": {},
|
||||
"Dropbox": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -89,47 +89,11 @@ func newScript() (*script, error) {
|
||||
return nil
|
||||
})
|
||||
|
||||
envconfig.Lookup = func(key string) (string, bool) {
|
||||
value, okValue := os.LookupEnv(key)
|
||||
location, okFile := os.LookupEnv(key + "_FILE")
|
||||
|
||||
switch {
|
||||
case okValue && !okFile: // only value
|
||||
return value, true
|
||||
case !okValue && okFile: // only file
|
||||
contents, err := os.ReadFile(location)
|
||||
if err != nil {
|
||||
s.must(fmt.Errorf("newScript: failed to read %s! Error: %s", location, err))
|
||||
return "", false
|
||||
}
|
||||
return string(contents), true
|
||||
case okValue && okFile: // both
|
||||
s.must(fmt.Errorf("newScript: both %s and %s are set!", key, key+"_FILE"))
|
||||
return "", false
|
||||
default: // neither, ignore
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
if err := envconfig.Process("", s.c); err != nil {
|
||||
return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err)
|
||||
}
|
||||
|
||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||
|
||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||
if tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
||||
}
|
||||
|
||||
var bf bytes.Buffer
|
||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||
}); tErr != nil {
|
||||
return nil, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
||||
}
|
||||
s.file = bf.String()
|
||||
|
||||
if s.c.BackupFilenameExpand {
|
||||
s.file = os.ExpandEnv(s.file)
|
||||
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
||||
@@ -150,19 +114,28 @@ func newScript() (*script, error) {
|
||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||
switch logType {
|
||||
case storage.LogLevelWarning:
|
||||
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
||||
s.logger.Warnf("["+context+"] "+msg, params...)
|
||||
case storage.LogLevelError:
|
||||
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
|
||||
s.logger.Errorf("["+context+"] "+msg, params...)
|
||||
case storage.LogLevelInfo:
|
||||
default:
|
||||
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
||||
s.logger.Infof("["+context+"] "+msg, params...)
|
||||
}
|
||||
}
|
||||
|
||||
if s.c.AwsS3BucketName != "" {
|
||||
accessKeyID, err := s.c.resolveSecret(s.c.AwsAccessKeyID, s.c.AwsAccessKeyIDFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error resolving AwsAccessKeyID: %w", err)
|
||||
}
|
||||
secretAccessKey, err := s.c.resolveSecret(s.c.AwsSecretAccessKey, s.c.AwsSecretAccessKeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error resolving AwsSecretAccessKey: %w", err)
|
||||
}
|
||||
s3Config := s3.Config{
|
||||
Endpoint: s.c.AwsEndpoint,
|
||||
AccessKeyID: s.c.AwsAccessKeyID,
|
||||
SecretAccessKey: s.c.AwsSecretAccessKey,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretAccessKey,
|
||||
IamRoleEndpoint: s.c.AwsIamRoleEndpoint,
|
||||
EndpointProto: s.c.AwsEndpointProto,
|
||||
EndpointInsecure: s.c.AwsEndpointInsecure,
|
||||
@@ -173,7 +146,7 @@ func newScript() (*script, error) {
|
||||
PartSize: s.c.AwsPartSize,
|
||||
}
|
||||
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
||||
return nil, err
|
||||
} else {
|
||||
s.storages = append(s.storages, s3Backend)
|
||||
}
|
||||
@@ -188,7 +161,7 @@ func newScript() (*script, error) {
|
||||
RemotePath: s.c.WebdavPath,
|
||||
}
|
||||
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
||||
return nil, err
|
||||
} else {
|
||||
s.storages = append(s.storages, webdavBackend)
|
||||
}
|
||||
@@ -205,7 +178,7 @@ func newScript() (*script, error) {
|
||||
RemotePath: s.c.SSHRemotePath,
|
||||
}
|
||||
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
||||
return nil, err
|
||||
} else {
|
||||
s.storages = append(s.storages, sshBackend)
|
||||
}
|
||||
@@ -230,28 +203,11 @@ func newScript() (*script, error) {
|
||||
}
|
||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
s.storages = append(s.storages, azureBackend)
|
||||
}
|
||||
|
||||
if s.c.DropboxRefreshToken != "" && s.c.DropboxAppKey != "" && s.c.DropboxAppSecret != "" {
|
||||
dropboxConfig := dropbox.Config{
|
||||
Endpoint: s.c.DropboxEndpoint,
|
||||
OAuth2Endpoint: s.c.DropboxOAuth2Endpoint,
|
||||
RefreshToken: s.c.DropboxRefreshToken,
|
||||
AppKey: s.c.DropboxAppKey,
|
||||
AppSecret: s.c.DropboxAppSecret,
|
||||
RemotePath: s.c.DropboxRemotePath,
|
||||
ConcurrencyLevel: s.c.DropboxConcurrencyLevel.Int(),
|
||||
}
|
||||
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
||||
}
|
||||
s.storages = append(s.storages, dropboxBackend)
|
||||
}
|
||||
|
||||
if s.c.EmailNotificationRecipient != "" {
|
||||
emailURL := fmt.Sprintf(
|
||||
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||
@@ -350,13 +306,11 @@ func (s *script) stopContainers() (func() error, error) {
|
||||
return noop, nil
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
s.logger.Infof(
|
||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||
len(containersToStop),
|
||||
containerLabel,
|
||||
len(allContainers),
|
||||
),
|
||||
)
|
||||
|
||||
var stoppedContainers []types.Container
|
||||
@@ -428,11 +382,9 @@ func (s *script) stopContainers() (func() error, error) {
|
||||
errors.Join(restartErrors...),
|
||||
)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf(
|
||||
s.logger.Infof(
|
||||
"Restarted %d container(s) and the matching service(s).",
|
||||
len(stoppedContainers),
|
||||
),
|
||||
)
|
||||
return nil
|
||||
}, stopError
|
||||
@@ -456,9 +408,7 @@ func (s *script) createArchive() error {
|
||||
if err := remove(backupSources); err != nil {
|
||||
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||
)
|
||||
s.logger.Infof("Removed snapshot `%s`.", backupSources)
|
||||
return nil
|
||||
})
|
||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||
@@ -467,9 +417,7 @@ func (s *script) createArchive() error {
|
||||
}); err != nil {
|
||||
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||
)
|
||||
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
|
||||
}
|
||||
|
||||
tarFile := s.file
|
||||
@@ -477,9 +425,7 @@ func (s *script) createArchive() error {
|
||||
if err := remove(tarFile); err != nil {
|
||||
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||
)
|
||||
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -503,13 +449,11 @@ func (s *script) createArchive() error {
|
||||
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
||||
}
|
||||
|
||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
|
||||
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
||||
)
|
||||
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -526,9 +470,7 @@ func (s *script) encryptArchive() error {
|
||||
if err := remove(gpgFile); err != nil {
|
||||
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
||||
}
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||
)
|
||||
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -558,9 +500,7 @@ func (s *script) encryptArchive() error {
|
||||
}
|
||||
|
||||
s.file = gpgFile
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
||||
)
|
||||
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -607,12 +547,6 @@ func (s *script) pruneBackups() error {
|
||||
for _, backend := range s.storages {
|
||||
b := backend
|
||||
eg.Go(func() error {
|
||||
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
|
||||
s.logger.Info(
|
||||
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -638,20 +572,7 @@ func (s *script) pruneBackups() error {
|
||||
// is non-nil.
|
||||
func (s *script) must(err error) {
|
||||
if err != nil {
|
||||
s.logger.Error(
|
||||
fmt.Sprintf("Fatal error running backup: %s", err),
|
||||
)
|
||||
s.logger.Errorf("Fatal error running backup: %s", err)
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// skipPrune returns true if the given backend name is contained in the
|
||||
// list of skipped backends.
|
||||
func skipPrune(name string, skippedBackends []string) bool {
|
||||
return slices.ContainsFunc(
|
||||
skippedBackends,
|
||||
func(b string) bool {
|
||||
return strings.EqualFold(b, name) // ignore case on both sides
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ Here is a list of all data passed to the template:
|
||||
* `FullPath`: full path of the backup file (e.g. `/archive/backup-2022-02-11T01-00-00.tar.gz`)
|
||||
* `Size`: size in bytes of the backup file
|
||||
* `Storages`: object that holds stats about each storage
|
||||
* `Local`, `S3`, `WebDAV`, `Azure`, `Dropbox` or `SSH`:
|
||||
* `Local`, `S3`, `WebDAV`, `Azure` or `SSH`:
|
||||
* `Total`: total number of backup files
|
||||
* `Pruned`: number of backup files that were deleted due to pruning rule
|
||||
* `PruneErrors`: number of backup files that were unable to be pruned
|
||||
|
||||
@@ -22,5 +22,12 @@ else
|
||||
done
|
||||
fi
|
||||
|
||||
if [ ! -z "$SERVE_METRICS_PATH" ]; then
|
||||
mkdir -p /var/www/html${SERVE_METRICS_PATH}
|
||||
echo "ok" > /var/www/html${SERVE_METRICS_PATH}/metrics.txt
|
||||
httpd -h /var/www/html -p "${SERVE_METRICS_PORT:-80}"
|
||||
echo "Serving metrics on port ${SERVE_METRICS_PORT:-80}."
|
||||
fi
|
||||
|
||||
echo "Starting cron in foreground."
|
||||
crond -f -d 8
|
||||
|
||||
31
go.mod
31
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/offen/docker-volume-backup
|
||||
|
||||
go 1.21
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
@@ -9,43 +9,33 @@ require (
|
||||
github.com/cosiner/argv v0.1.0
|
||||
github.com/docker/docker v24.0.5+incompatible
|
||||
github.com/gofrs/flock v0.8.1
|
||||
github.com/klauspost/compress v1.16.7
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||
github.com/minio/minio-go/v7 v7.0.62
|
||||
github.com/offen/envconfig v1.5.0
|
||||
github.com/minio/minio-go/v7 v7.0.61
|
||||
github.com/otiai10/copy v1.11.0
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/pkg/sftp v1.13.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/studio-b12/gowebdav v0.9.0
|
||||
golang.org/x/crypto v0.12.0
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783
|
||||
golang.org/x/crypto v0.11.0
|
||||
golang.org/x/sync v0.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -62,10 +52,9 @@ require (
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gotest.tools/v3 v3.0.3 // indirect
|
||||
|
||||
61
go.sum
61
go.sum
@@ -188,7 +188,6 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9Orh
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
@@ -201,8 +200,6 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -220,7 +217,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -230,8 +226,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -253,7 +247,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
|
||||
@@ -262,8 +255,6 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
@@ -451,6 +442,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
@@ -458,8 +451,6 @@ github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
@@ -499,8 +490,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.62 h1:qNYsFZHEzl+NfH8UxW4jpmlKav1qUAgfY30YNRneVhc=
|
||||
github.com/minio/minio-go/v7 v7.0.62/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4=
|
||||
github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI=
|
||||
github.com/minio/minio-go/v7 v7.0.61/go.mod h1:BTu8FcrEw+HidY0zd/0eny43QnVNkXRPXrLXFuQBHXg=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
@@ -527,8 +518,6 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/offen/envconfig v1.5.0 h1:LHL4wYIDVeoGxSDI40MShmWfss3gYUlCdstfSiSq4Fk=
|
||||
github.com/offen/envconfig v1.5.0/go.mod h1:L7ny7R+4JWH3VVnZ+ARHvZysWUiZ2eQcm3L0imU9ACY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
@@ -554,7 +543,6 @@ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod
|
||||
github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc=
|
||||
github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
|
||||
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
@@ -566,8 +554,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
|
||||
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
|
||||
github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
|
||||
github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
@@ -669,12 +657,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -713,7 +700,6 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -772,11 +758,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -800,7 +783,6 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
|
||||
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -914,20 +896,13 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -938,10 +913,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1007,7 +980,6 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1071,7 +1043,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
||||
@@ -132,7 +132,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), func() error {
|
||||
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(matches))
|
||||
var errs []error
|
||||
|
||||
@@ -1,260 +0,0 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||
"github.com/offen/docker-volume-backup/internal/storage"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type dropboxStorage struct {
|
||||
*storage.StorageBackend
|
||||
client files.Client
|
||||
concurrencyLevel int
|
||||
}
|
||||
|
||||
// Config allows to configure a Dropbox storage backend.
|
||||
type Config struct {
|
||||
Endpoint string
|
||||
OAuth2Endpoint string
|
||||
RefreshToken string
|
||||
AppKey string
|
||||
AppSecret string
|
||||
RemotePath string
|
||||
ConcurrencyLevel int
|
||||
}
|
||||
|
||||
// NewStorageBackend creates and initializes a new Dropbox storage backend.
|
||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||
tokenUrl, _ := url.JoinPath(opts.OAuth2Endpoint, "oauth2/token")
|
||||
|
||||
conf := &oauth2.Config{
|
||||
ClientID: opts.AppKey,
|
||||
ClientSecret: opts.AppSecret,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
TokenURL: tokenUrl,
|
||||
},
|
||||
}
|
||||
|
||||
logFunc(storage.LogLevelInfo, "Dropbox", "Fetching fresh access token for Dropbox storage backend.")
|
||||
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
||||
token, err := tkSource.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err)
|
||||
}
|
||||
|
||||
dbxConfig := dropbox.Config{
|
||||
Token: token.AccessToken,
|
||||
}
|
||||
|
||||
if opts.Endpoint != "https://api.dropbox.com/" {
|
||||
dbxConfig.URLGenerator = func(hostType string, namespace string, route string) string {
|
||||
return fmt.Sprintf("%s/%d/%s/%s", opts.Endpoint, 2, namespace, route)
|
||||
}
|
||||
}
|
||||
|
||||
client := files.New(dbxConfig)
|
||||
|
||||
if opts.ConcurrencyLevel < 1 {
|
||||
logFunc(storage.LogLevelWarning, "Dropbox", "Concurrency level must be at least 1! Using 1 instead of %d.", opts.ConcurrencyLevel)
|
||||
opts.ConcurrencyLevel = 1
|
||||
}
|
||||
|
||||
return &dropboxStorage{
|
||||
StorageBackend: &storage.StorageBackend{
|
||||
DestinationPath: opts.RemotePath,
|
||||
Log: logFunc,
|
||||
},
|
||||
client: client,
|
||||
concurrencyLevel: opts.ConcurrencyLevel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the name of the storage backend
|
||||
func (b *dropboxStorage) Name() string {
|
||||
return "Dropbox"
|
||||
}
|
||||
|
||||
// Copy copies the given file to the WebDav storage backend.
|
||||
func (b *dropboxStorage) Copy(file string) error {
|
||||
_, name := path.Split(file)
|
||||
|
||||
folderArg := files.NewCreateFolderArg(b.DestinationPath)
|
||||
if _, err := b.client.CreateFolderV2(folderArg); err != nil {
|
||||
switch err := err.(type) {
|
||||
case files.CreateFolderV2APIError:
|
||||
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
||||
default:
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
r, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Start new upload session and get session id
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||
|
||||
var sessionId string
|
||||
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
||||
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err)
|
||||
} else {
|
||||
sessionId = res.SessionId
|
||||
}
|
||||
|
||||
// Send the file in 148MB chunks (Dropbox API limit is 150MB, concurrent upload requires a multiple of 4MB though)
|
||||
// Last append can be any size <= 150MB with Close=True
|
||||
|
||||
const chunkSize = 148 * 1024 * 1024 // 148MB
|
||||
var offset uint64 = 0
|
||||
var guard = make(chan struct{}, b.concurrencyLevel)
|
||||
var errorChn = make(chan error, b.concurrencyLevel)
|
||||
var EOFChn = make(chan bool, b.concurrencyLevel)
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
loop:
|
||||
for {
|
||||
guard <- struct{}{} // limit concurrency
|
||||
select {
|
||||
case err := <-errorChn: // error from goroutine
|
||||
return err
|
||||
case <-EOFChn: // EOF from goroutine
|
||||
wg.Wait() // wait for all goroutines to finish
|
||||
break loop
|
||||
default:
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
<-guard
|
||||
}()
|
||||
wg.Add(1)
|
||||
chunk := make([]byte, chunkSize)
|
||||
|
||||
mu.Lock() // to preserve offset of chunks
|
||||
|
||||
select {
|
||||
case <-EOFChn:
|
||||
EOFChn <- true // put it back for outer loop
|
||||
mu.Unlock()
|
||||
return // already EOF
|
||||
default:
|
||||
}
|
||||
|
||||
bytesRead, err := r.Read(chunk)
|
||||
if err != nil {
|
||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err)
|
||||
mu.Unlock()
|
||||
return
|
||||
}
|
||||
chunk = chunk[:bytesRead]
|
||||
|
||||
uploadSessionAppendArg := files.NewUploadSessionAppendArg(
|
||||
files.NewUploadSessionCursor(sessionId, offset),
|
||||
)
|
||||
isEOF := bytesRead < chunkSize
|
||||
uploadSessionAppendArg.Close = isEOF
|
||||
if isEOF {
|
||||
EOFChn <- true
|
||||
}
|
||||
offset += uint64(bytesRead)
|
||||
|
||||
mu.Unlock()
|
||||
|
||||
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Finish the upload session, commit the file (no new data added)
|
||||
|
||||
_, err = b.client.UploadSessionFinish(
|
||||
files.NewUploadSessionFinishArg(
|
||||
files.NewUploadSessionCursor(sessionId, 0),
|
||||
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||
), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
||||
}
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prune rotates away backups according to the configuration and provided deadline for the Dropbox storage backend.
|
||||
func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
var entries []files.IsMetadata
|
||||
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||
}
|
||||
entries = append(entries, res.Entries...)
|
||||
|
||||
for res.HasMore {
|
||||
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||
}
|
||||
entries = append(entries, res.Entries...)
|
||||
}
|
||||
|
||||
var matches []*files.FileMetadata
|
||||
var lenCandidates int
|
||||
for _, candidate := range entries {
|
||||
switch candidate := candidate.(type) {
|
||||
case *files.FileMetadata:
|
||||
if !strings.HasPrefix(candidate.Name, pruningPrefix) {
|
||||
continue
|
||||
}
|
||||
lenCandidates++
|
||||
if candidate.ServerModified.Before(deadline) {
|
||||
matches = append(matches, candidate)
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
stats := &storage.PruneStats{
|
||||
Total: uint(lenCandidates),
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||
for _, match := range matches {
|
||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
@@ -47,9 +47,9 @@ func (b *localStorage) Copy(file string) error {
|
||||
_, name := path.Split(file)
|
||||
|
||||
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
|
||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to local archive: %w", err)
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath)
|
||||
|
||||
if b.latestSymlink != "" {
|
||||
symlink := path.Join(b.DestinationPath, b.latestSymlink)
|
||||
@@ -116,7 +116,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
|
||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error {
|
||||
var removeErrors []error
|
||||
for _, match := range matches {
|
||||
if err := os.Remove(match); err != nil {
|
||||
@@ -125,7 +125,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
||||
}
|
||||
if len(removeErrors) != 0 {
|
||||
return fmt.Errorf(
|
||||
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
|
||||
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
|
||||
len(removeErrors),
|
||||
errors.Join(removeErrors...),
|
||||
)
|
||||
|
||||
@@ -125,12 +125,7 @@ func (b *s3Storage) Copy(file string) error {
|
||||
|
||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||
return fmt.Errorf(
|
||||
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||
errResp.Message,
|
||||
errResp.Code,
|
||||
errResp.StatusCode,
|
||||
)
|
||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
|
||||
}
|
||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
||||
}
|
||||
@@ -153,7 +148,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
||||
lenCandidates++
|
||||
if candidate.Err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
||||
"(*s3Storage).Prune: Error looking up candidates from remote storage! %w",
|
||||
candidate.Err,
|
||||
)
|
||||
}
|
||||
@@ -167,7 +162,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error {
|
||||
objectsCh := make(chan minio.ObjectInfo)
|
||||
go func() {
|
||||
for _, match := range matches {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -45,7 +46,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
}
|
||||
|
||||
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
||||
key, err := os.ReadFile(opts.IdentityFile)
|
||||
key, err := ioutil.ReadFile(opts.IdentityFile)
|
||||
if err != nil {
|
||||
return nil, errors.New("NewStorageBackend: error reading the private key")
|
||||
}
|
||||
@@ -74,7 +75,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
|
||||
return nil, fmt.Errorf("NewStorageBackend: Error creating ssh client: %w", err)
|
||||
}
|
||||
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
||||
if err != nil {
|
||||
@@ -107,13 +108,13 @@ func (b *sshStorage) Copy(file string) error {
|
||||
source, err := os.Open(file)
|
||||
_, name := path.Split(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Copy: Error reading the file to be uploaded: %w", err)
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Copy: Error creating file on SSH storage: %w", err)
|
||||
}
|
||||
defer destination.Close()
|
||||
|
||||
@@ -123,7 +124,7 @@ func (b *sshStorage) Copy(file string) error {
|
||||
if err == io.EOF {
|
||||
tot, err := destination.Write(chunk[:num])
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
||||
}
|
||||
|
||||
if tot != len(chunk[:num]) {
|
||||
@@ -134,12 +135,12 @@ func (b *sshStorage) Copy(file string) error {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
||||
}
|
||||
|
||||
tot, err := destination.Write(chunk[:num])
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Copy: Error uploading the file to SSH storage: %w", err)
|
||||
}
|
||||
|
||||
if tot != len(chunk[:num]) {
|
||||
@@ -147,7 +148,7 @@ func (b *sshStorage) Copy(file string) error {
|
||||
}
|
||||
}
|
||||
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -156,7 +157,7 @@ func (b *sshStorage) Copy(file string) error {
|
||||
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
|
||||
return nil, fmt.Errorf("(*sshStorage).Prune: Error reading directory from SSH storage: %w", err)
|
||||
}
|
||||
|
||||
var matches []string
|
||||
@@ -174,10 +175,10 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), func() error {
|
||||
if err := b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
|
||||
return fmt.Errorf("(*sshStorage).Prune: Error removing file from SSH storage: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -39,22 +39,23 @@ type PruneStats struct {
|
||||
|
||||
// DoPrune holds general control flow that applies to any kind of storage.
|
||||
// Callers can pass in a thunk that performs the actual deletion of files.
|
||||
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, doRemoveFiles func() error) error {
|
||||
func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error {
|
||||
if lenMatches != 0 && lenMatches != lenCandidates {
|
||||
if err := doRemoveFiles(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.Log(LogLevelInfo, context,
|
||||
"Pruned %d out of %d backups as their age exceeded the configured retention period of %d days.",
|
||||
"Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.",
|
||||
lenMatches,
|
||||
lenCandidates,
|
||||
description,
|
||||
b.RetentionDays,
|
||||
)
|
||||
} else if lenMatches != 0 && lenMatches == lenCandidates {
|
||||
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing backups.", lenMatches)
|
||||
b.Log(LogLevelWarning, context, "The current configuration would delete all %d existing %s.", lenMatches, description)
|
||||
b.Log(LogLevelWarning, context, "Refusing to do so, please check your configuration.")
|
||||
} else {
|
||||
b.Log(LogLevelInfo, context, "None of %d existing backups were pruned.", lenCandidates)
|
||||
b.Log(LogLevelInfo, context, "None of %d existing %s were pruned.", lenCandidates, description)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -69,18 +69,18 @@ func (b *webDavStorage) Name() string {
|
||||
func (b *webDavStorage) Copy(file string) error {
|
||||
_, name := path.Split(file)
|
||||
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
|
||||
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
|
||||
}
|
||||
|
||||
r, err := os.Open(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
|
||||
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
|
||||
}
|
||||
|
||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
|
||||
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
|
||||
}
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error {
|
||||
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||
candidates, err := b.client.ReadDir(b.DestinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
|
||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||
}
|
||||
var matches []fs.FileInfo
|
||||
var lenCandidates int
|
||||
@@ -108,10 +108,10 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
|
||||
Pruned: uint(len(matches)),
|
||||
}
|
||||
|
||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, func() error {
|
||||
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error {
|
||||
for _, match := range matches {
|
||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
|
||||
return fmt.Errorf("(*webDavStorage).Prune: Error removing file from WebDAV storage: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
FROM docker:24-dind
|
||||
|
||||
RUN apk add \
|
||||
coreutils \
|
||||
curl \
|
||||
gpg \
|
||||
jq \
|
||||
moreutils \
|
||||
tar \
|
||||
zstd \
|
||||
--no-cache
|
||||
|
||||
WORKDIR /code/test
|
||||
@@ -1,70 +0,0 @@
|
||||
# Integration Tests
|
||||
|
||||
## Running tests
|
||||
|
||||
The main entry point for running tests is the `./test.sh` script.
|
||||
It can be used to run the entire test suite, or just a single test case.
|
||||
|
||||
### Run all tests
|
||||
|
||||
```sh
|
||||
./test.sh
|
||||
```
|
||||
|
||||
### Run a single test case
|
||||
|
||||
```sh
|
||||
./test.sh <directory-name>
|
||||
```
|
||||
|
||||
### Configuring a test run
|
||||
|
||||
In addition to the match pattern, which can be given as the first positional argument, certain behavior can be changed by setting environment variables:
|
||||
|
||||
#### `BUILD_IMAGE`
|
||||
|
||||
When set, the test script will build an up-to-date `docker-volume-backup` image from the current state of your source tree, and run the tests against it.
|
||||
|
||||
```sh
|
||||
BUILD_IMAGE=1 ./test.sh
|
||||
```
|
||||
|
||||
The default behavior is not to build an image, and instead look for a version on your host system.
|
||||
|
||||
#### `IMAGE_TAG`
|
||||
|
||||
Setting this value lets you run tests against different existing images, so you can compare behavior:
|
||||
|
||||
```sh
|
||||
IMAGE_TAG=v2.30.0 ./test.sh
|
||||
```
|
||||
|
||||
#### `NO_IMAGE_CACHE`
|
||||
|
||||
When set, images from remote registries will not be cached and shared between sandbox containers.
|
||||
|
||||
```sh
|
||||
NO_IMAGE_CACHE=1 ./test.sh
|
||||
```
|
||||
|
||||
By default, two local images are created that persist the image data and provide it to containers at runtime.
|
||||
|
||||
## Understanding the test setup
|
||||
|
||||
The test setup runs each test case in an isolated Docker container, which itself is running an otherwise unused Docker daemon.
|
||||
This means, tests can rely on noone else using that daemon, making expectations about the number of running containers and so forth.
|
||||
As the sandbox container is also expected to be torn down post test, the scripts do not need to do any clean up or similar.
|
||||
|
||||
## Anatomy of a test case
|
||||
|
||||
The `test.sh` script looks for an exectuable file called `run.sh` in each directory.
|
||||
When found, it is executed and signals success by returning a 0 exit code.
|
||||
Any other exit code is considered a failure and will halt execution of further tests.
|
||||
|
||||
There is an `util.sh` file containing a few commonly used helpers which can be used by putting the following prelude to a new test case:
|
||||
|
||||
```sh
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
```
|
||||
@@ -2,9 +2,9 @@ version: '3'
|
||||
|
||||
services:
|
||||
storage:
|
||||
image: mcr.microsoft.com/azure-storage/azurite:3.26.0
|
||||
image: mcr.microsoft.com/azure-storage/azurite
|
||||
volumes:
|
||||
- ${DATA_DIR:-./data}:/data
|
||||
- azurite_backup_data:/data
|
||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
||||
healthcheck:
|
||||
test: nc 127.0.0.1 10000 -z
|
||||
@@ -12,9 +12,9 @@ services:
|
||||
retries: 30
|
||||
|
||||
az_cli:
|
||||
image: mcr.microsoft.com/azure-cli:2.51.0
|
||||
image: mcr.microsoft.com/azure-cli
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/dump
|
||||
- ./local:/dump
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@@ -53,4 +53,6 @@ services:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
azurite_backup_data:
|
||||
name: azurite_backup_data
|
||||
app_data:
|
||||
|
||||
70
test/azure/run.sh
Executable file → Normal file
70
test/azure/run.sh
Executable file → Normal file
@@ -6,81 +6,35 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
export DATA_DIR=$(mktemp -d)
|
||||
|
||||
download_az () {
|
||||
docker compose run --rm az_cli \
|
||||
az storage blob download -f /dump/$1.tar.gz -c test-container -n path/to/backup/$1.tar.gz
|
||||
}
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
# A symlink for a known file in the volume is created so the test can check
|
||||
# whether symlinks are preserved on backup.
|
||||
docker compose exec backup backup
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "3"
|
||||
|
||||
download_az "test"
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $TMP_DIR
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expeced file in untared backup"
|
||||
fi
|
||||
docker compose run --rm az_cli \
|
||||
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
|
||||
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||
|
||||
pass "Found relevant files in untared remote backups."
|
||||
rm "$LOCAL_DIR/test.tar.gz"
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
download_az "test"
|
||||
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||
fail "Remote backup was deleted"
|
||||
fi
|
||||
docker compose run --rm az_cli \
|
||||
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
|
||||
test -f ./local/test.tar.gz
|
||||
|
||||
pass "Remote backups have not been deleted."
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create first backup with no prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
docker compose run --rm az_cli \
|
||||
az storage blob upload -f /dump/test.tar.gz -c test-container -n path/to/backup/test-old.tar.gz
|
||||
|
||||
docker compose down
|
||||
rm "$LOCAL_DIR/test.tar.gz"
|
||||
|
||||
back_date="$(date "+%Y-%m-%dT%H:%M:%S%z" -d "14 days ago" | rev | cut -c 3- | rev):00"
|
||||
jq --arg back_date "$back_date" '(.collections[] | select(.name=="$BLOBS_COLLECTION$") | .data[] | select(.name=="path/to/backup/test-old.tar.gz") | .properties.creationTime = $back_date)' "$DATA_DIR/__azurite_db_blob__.json" | sponge "$DATA_DIR/__azurite_db_blob__.json"
|
||||
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create second backup and prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
info "Download first backup which should be pruned"
|
||||
download_az "test-old" || true
|
||||
if [ -f "$LOCAL_DIR/test-old.tar.gz" ]; then
|
||||
fail "Backdated file was not deleted"
|
||||
fi
|
||||
download_az "test" || true
|
||||
if [ ! -f "$LOCAL_DIR/test.tar.gz" ]; then
|
||||
fail "Recent file was not found"
|
||||
fi
|
||||
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
docker compose down --volumes
|
||||
|
||||
@@ -12,8 +12,8 @@ services:
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server --certs-dir "/certs" --address ":443" /data'
|
||||
volumes:
|
||||
- minio_backup_data:/data
|
||||
- ${CERT_DIR:-.}/minio.crt:/certs/public.crt
|
||||
- ${CERT_DIR:-.}/minio.key:/certs/private.key
|
||||
- ./minio.crt:/certs/public.crt
|
||||
- ./minio.key:/certs/private.key
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
@@ -33,7 +33,7 @@ services:
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ${CERT_DIR:-.}/rootCA.crt:/root/minio-rootCA.crt
|
||||
- ./rootCA.crt:/root/minio-rootCA.crt
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
24
test/certs/run.sh
Executable file → Normal file
24
test/certs/run.sh
Executable file → Normal file
@@ -6,27 +6,25 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export CERT_DIR=$(mktemp -d)
|
||||
|
||||
openssl genrsa -des3 -passout pass:test -out "$CERT_DIR/rootCA.key" 4096
|
||||
openssl genrsa -des3 -passout pass:test -out rootCA.key 4096
|
||||
openssl req -passin pass:test \
|
||||
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc." \
|
||||
-x509 -new -key "$CERT_DIR/rootCA.key" -sha256 -days 1 -out "$CERT_DIR/rootCA.crt"
|
||||
-x509 -new -key rootCA.key -sha256 -days 1 -out rootCA.crt
|
||||
|
||||
openssl genrsa -out "$CERT_DIR/minio.key" 4096
|
||||
openssl req -new -sha256 -key "$CERT_DIR/minio.key" \
|
||||
openssl genrsa -out minio.key 4096
|
||||
openssl req -new -sha256 -key minio.key \
|
||||
-subj "/C=DE/ST=BE/O=IntegrationTest, Inc./CN=minio" \
|
||||
-out "$CERT_DIR/minio.csr"
|
||||
-out minio.csr
|
||||
|
||||
openssl x509 -req -passin pass:test \
|
||||
-in "$CERT_DIR/minio.csr" \
|
||||
-CA "$CERT_DIR/rootCA.crt" -CAkey "$CERT_DIR/rootCA.key" -CAcreateserial \
|
||||
-in minio.csr \
|
||||
-CA rootCA.crt -CAkey rootCA.key -CAcreateserial \
|
||||
-extfile san.cnf \
|
||||
-out "$CERT_DIR/minio.crt" -days 1 -sha256
|
||||
-out minio.crt -days 1 -sha256
|
||||
|
||||
openssl x509 -in "$CERT_DIR/minio.crt" -noout -text
|
||||
openssl x509 -in minio.crt -noout -text
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
@@ -41,3 +39,5 @@ docker run --rm \
|
||||
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
|
||||
|
||||
pass "Found relevant files in untared remote backups."
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
@@ -13,7 +13,7 @@ docker volume create app_data
|
||||
# correctly. It is not supposed to hold any data.
|
||||
docker volume create empty_data
|
||||
|
||||
docker run -d -q \
|
||||
docker run -d \
|
||||
--name minio \
|
||||
--network test_network \
|
||||
--env MINIO_ROOT_USER=test \
|
||||
@@ -25,7 +25,7 @@ docker run -d -q \
|
||||
|
||||
docker exec minio mkdir -p /data/backup
|
||||
|
||||
docker run -d -q \
|
||||
docker run -d \
|
||||
--name offen \
|
||||
--network test_network \
|
||||
-v app_data:/var/opt/offen/ \
|
||||
@@ -33,7 +33,7 @@ docker run -d -q \
|
||||
|
||||
sleep 10
|
||||
|
||||
docker run --rm -q \
|
||||
docker run --rm \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v empty_data:/backup/empty_data \
|
||||
@@ -48,7 +48,7 @@ docker run --rm -q \
|
||||
--entrypoint backup \
|
||||
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
docker run --rm -q \
|
||||
docker run --rm \
|
||||
-v backup_data:/data alpine \
|
||||
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
||||
|
||||
|
||||
1
test/commands/.gitignore
vendored
Normal file
1
test/commands/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -42,7 +42,7 @@ services:
|
||||
EXEC_LABEL: test
|
||||
EXEC_FORWARD_OUTPUT: "true"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- app_data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
|
||||
28
test/commands/run.sh
Executable file → Normal file
28
test/commands/run.sh
Executable file → Normal file
@@ -6,37 +6,36 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
mkdir -p ./local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 30 # mariadb likes to take a bit before responding
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $TMP_DIR
|
||||
if [ ! -f "$TMP_DIR/backup/data/dump.sql" ]; then
|
||||
tar -xvf ./local/test.tar.gz
|
||||
if [ ! -f ./backup/data/dump.sql ]; then
|
||||
fail "Could not find file written by pre command."
|
||||
fi
|
||||
pass "Found expected file."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/not-relevant.txt" ]; then
|
||||
if [ -f ./backup/data/not-relevant.txt ]; then
|
||||
fail "Command ran for container with other label."
|
||||
fi
|
||||
pass "Command did not run for container with other label."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/post.txt" ]; then
|
||||
if [ -f ./backup/data/post.txt ]; then
|
||||
fail "File created in post command was present in backup."
|
||||
fi
|
||||
pass "Did not find unexpected file."
|
||||
|
||||
docker compose down --volumes
|
||||
sudo rm -rf ./local
|
||||
|
||||
|
||||
info "Running commands test in swarm mode next."
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
|
||||
mkdir -p ./local
|
||||
docker swarm init
|
||||
|
||||
docker stack deploy --compose-file=docker-compose.yml test_stack
|
||||
@@ -50,13 +49,16 @@ sleep 20
|
||||
|
||||
docker exec $(docker ps -q -f name=backup) backup
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $TMP_DIR
|
||||
if [ ! -f "$TMP_DIR/backup/data/dump.sql" ]; then
|
||||
tar -xvf ./local/test.tar.gz
|
||||
if [ ! -f ./backup/data/dump.sql ]; then
|
||||
fail "Could not find file written by pre command."
|
||||
fi
|
||||
pass "Found expected file."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/post.txt" ]; then
|
||||
if [ -f ./backup/data/post.txt ]; then
|
||||
fail "File created in post command was present in backup."
|
||||
fi
|
||||
pass "Did not find unexpected file."
|
||||
|
||||
docker stack rm test_stack
|
||||
docker swarm leave --force
|
||||
|
||||
1
test/confd/.gitignore
vendored
Normal file
1
test/confd/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
restart: always
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- ./01backup.env:/etc/dockervolumebackup/conf.d/01backup.env
|
||||
- ./02backup.env:/etc/dockervolumebackup/conf.d/02backup.env
|
||||
|
||||
@@ -6,24 +6,26 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
|
||||
# sleep until a backup is guaranteed to have happened on the 1 minute schedule
|
||||
sleep 100
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
||||
docker compose down --volumes
|
||||
|
||||
if [ ! -f ./local/conf.tar.gz ]; then
|
||||
fail "Config from file was not used."
|
||||
fi
|
||||
pass "Config from file was used."
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/other.tar.gz" ]; then
|
||||
if [ ! -f ./local/other.tar.gz ]; then
|
||||
fail "Run on same schedule did not succeed."
|
||||
fi
|
||||
pass "Run on same schedule succeeded."
|
||||
|
||||
if [ -f "$LOCAL_DIR/never.tar.gz" ]; then
|
||||
if [ -f ./local/never.tar.gz ]; then
|
||||
fail "Unexpected file was found."
|
||||
fi
|
||||
pass "Unexpected cron did not run."
|
||||
|
||||
1
test/dropbox/.gitignore
vendored
1
test/dropbox/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
user_v2_ready.yaml
|
||||
@@ -1,57 +0,0 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
openapi_mock:
|
||||
image: muonsoft/openapi-mock:0.3.9
|
||||
environment:
|
||||
OPENAPI_MOCK_USE_EXAMPLES: if_present
|
||||
OPENAPI_MOCK_SPECIFICATION_URL: '/etc/openapi/user_v2.yaml'
|
||||
ports:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- ${SPEC_FILE:-./user_v2.yaml}:/etc/openapi/user_v2.yaml
|
||||
|
||||
oauth2_mock:
|
||||
image: ghcr.io/navikt/mock-oauth2-server:1.0.0
|
||||
ports:
|
||||
- 8090:8090
|
||||
environment:
|
||||
PORT: 8090
|
||||
JSON_CONFIG_PATH: '/etc/oauth2/config.json'
|
||||
volumes:
|
||||
- ./oauth2_config.json:/etc/oauth2/config.json
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
hostname: hostnametoken
|
||||
depends_on:
|
||||
- openapi_mock
|
||||
- oauth2_mock
|
||||
restart: always
|
||||
environment:
|
||||
BACKUP_FILENAME_EXPAND: 'true'
|
||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
DROPBOX_ENDPOINT: http://openapi_mock:8080
|
||||
DROPBOX_OAUTH2_ENDPOINT: http://oauth2_mock:8090
|
||||
DROPBOX_REFRESH_TOKEN: test
|
||||
DROPBOX_APP_KEY: test
|
||||
DROPBOX_APP_SECRET: test
|
||||
DROPBOX_REMOTE_PATH: /test
|
||||
DROPBOX_CONCURRENCY_LEVEL: 6
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"interactiveLogin": true,
|
||||
"httpServer": "NettyWrapper",
|
||||
"tokenCallbacks": [
|
||||
{
|
||||
"issuerId": "issuer1",
|
||||
"tokenExpiry": 120,
|
||||
"requestMappings": [
|
||||
{
|
||||
"requestParam": "scope",
|
||||
"match": "scope1",
|
||||
"claims": {
|
||||
"sub": "subByScope",
|
||||
"aud": [
|
||||
"audByScope"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"issuerId": "issuer2",
|
||||
"requestMappings": [
|
||||
{
|
||||
"requestParam": "someparam",
|
||||
"match": "somevalue",
|
||||
"claims": {
|
||||
"sub": "subBySomeParam",
|
||||
"aud": [
|
||||
"audBySomeParam"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export SPEC_FILE=$(mktemp -d)/user_v2.yaml
|
||||
cp user_v2.yaml $SPEC_FILE
|
||||
sed -i 's/SERVER_MODIFIED_1/'"$(date "+%Y-%m-%dT%H:%M:%SZ")/g" $SPEC_FILE
|
||||
sed -i 's/SERVER_MODIFIED_2/'"$(date "+%Y-%m-%dT%H:%M:%SZ" -d "14 days ago")/g" $SPEC_FILE
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
logs=$(docker compose exec -T backup backup)
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "4"
|
||||
|
||||
echo "$logs"
|
||||
if echo "$logs" | grep -q "ERROR"; then
|
||||
fail "Backup failed, errors reported: $logs"
|
||||
else
|
||||
pass "Backup succeeded, no errors reported."
|
||||
fi
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
logs=$(docker compose exec -T backup backup)
|
||||
|
||||
echo "$logs"
|
||||
if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then
|
||||
pass "Remote backups have not been deleted."
|
||||
else
|
||||
fail "Remote backups would have been deleted: $logs"
|
||||
fi
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create second backup and prune"
|
||||
logs=$(docker compose exec -T backup backup)
|
||||
|
||||
echo "$logs"
|
||||
if echo "$logs" | grep -q "Pruned 1 out of 2 backups as their age exceeded the configured retention period"; then
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
elif echo "$logs" | grep -q "ERROR"; then
|
||||
fail "Pruning failed, errors reported: $logs"
|
||||
elif echo "$logs" | grep -q "None of 1 existing backups were pruned"; then
|
||||
fail "Pruning failed, old backup has not been pruned: $logs"
|
||||
else
|
||||
fail "Pruning failed, unknown result: $logs"
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@ services:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
EXEC_FORWARD_OUTPUT: "true"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-local}:/local
|
||||
- ./local:/local
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
|
||||
8
test/extend/run.sh
Executable file → Normal file
8
test/extend/run.sh
Executable file → Normal file
@@ -6,14 +6,14 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
export BASE_VERSION="${TEST_VERSION:-canary}"
|
||||
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
|
||||
|
||||
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
@@ -22,6 +22,8 @@ sleep 5
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/app_data/offen.db" ]; then
|
||||
if [ ! -f "./local/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
1
test/gpg/.gitignore
vendored
Normal file
1
test/gpg/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -11,7 +11,7 @@ services:
|
||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||
GPG_PASSPHRASE: 1234#$$ecret
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
|
||||
@@ -6,27 +6,28 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
expect_running_containers "2"
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
tmp_dir=$(mktemp -d)
|
||||
|
||||
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||
|
||||
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||
echo "1234#\$ecret" | gpg -d --pinentry-mode loopback --yes --passphrase-fd 0 ./local/test.tar.gz.gpg > ./local/decrypted.tar.gz
|
||||
tar -xf ./local/decrypted.tar.gz -C $tmp_dir
|
||||
if [ ! -f $tmp_dir/backup/app_data/offen.db ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||
rm ./local/decrypted.tar.gz
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||
if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
|
||||
fail "Could not find local symlink to latest encrypted backup."
|
||||
fi
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
1
test/ignore/.gitignore
vendored
Normal file
1
test/ignore/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -11,5 +11,5 @@ services:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_EXCLUDE_REGEXP: '\.(me|you)$$'
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- ./sources:/backup/data:ro
|
||||
|
||||
14
test/ignore/run.sh
Executable file → Normal file
14
test/ignore/run.sh
Executable file → Normal file
@@ -6,21 +6,23 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
docker compose exec backup backup
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
tar --same-owner -xvf "$LOCAL_DIR/test.tar.gz" -C "$TMP_DIR"
|
||||
docker compose down --volumes
|
||||
|
||||
if [ ! -f "$TMP_DIR/backup/data/me.txt" ]; then
|
||||
out=$(mktemp -d)
|
||||
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"
|
||||
|
||||
if [ ! -f "$out/backup/data/me.txt" ]; then
|
||||
fail "Expected file was not found."
|
||||
fi
|
||||
pass "Expected file was found."
|
||||
|
||||
if [ -f "$TMP_DIR/backup/data/skip.me" ]; then
|
||||
if [ -f "$out/backup/data/skip.me" ]; then
|
||||
fail "Ignored file was found."
|
||||
fi
|
||||
pass "Ignored file was not found."
|
||||
|
||||
1
test/local/.gitignore
vendored
Normal file
1
test/local/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -16,7 +16,7 @@ services:
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
|
||||
@@ -6,9 +6,9 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
# A symlink for a known file in the volume is created so the test can check
|
||||
@@ -21,11 +21,11 @@ sleep 5
|
||||
expect_running_containers "2"
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
tar -xvf "$LOCAL_DIR/test-hostnametoken.tar.gz" -C $tmp_dir
|
||||
tar -xvf ./local/test-hostnametoken.tar.gz -C $tmp_dir
|
||||
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
rm -f "$LOCAL_DIR/test-hostnametoken.tar.gz"
|
||||
rm -f ./local/test-hostnametoken.tar.gz
|
||||
|
||||
if [ ! -L "$tmp_dir/backup/app_data/db.link" ]; then
|
||||
fail "Could not find expected symlink in untared archive."
|
||||
@@ -33,7 +33,7 @@ fi
|
||||
|
||||
pass "Found relevant files in decrypted and untared local backup."
|
||||
|
||||
if [ ! -L "$LOCAL_DIR/test-hostnametoken.latest.tar.gz.gpg" ]; then
|
||||
if [ ! -L ./local/test-hostnametoken.latest.tar.gz.gpg ]; then
|
||||
fail "Could not find symlink to latest version."
|
||||
fi
|
||||
|
||||
@@ -41,36 +41,15 @@ pass "Found symlink to latest version in local backup."
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
if [ "$(find "$LOCAL_DIR" -type f | wc -l)" != "1" ]; then
|
||||
fail "Backups should not have been deleted, instead seen: "$(find "$local_dir" -type f)""
|
||||
if [ "$(find ./local -type f | wc -l)" != "1" ]; then
|
||||
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
|
||||
fi
|
||||
pass "Local backups have not been deleted."
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create first backup with no prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
touch -r "$LOCAL_DIR/test-hostnametoken.tar.gz" -d "14 days ago" "$LOCAL_DIR/test-hostnametoken-old.tar.gz"
|
||||
|
||||
info "Create second backup and prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
if [ -f "$LOCAL_DIR/test-hostnametoken-old.tar.gz" ]; then
|
||||
fail "Backdated file has not been deleted."
|
||||
fi
|
||||
|
||||
if [ ! -f "$LOCAL_DIR/test-hostnametoken.tar.gz" ]; then
|
||||
fail "Recent file has been deleted."
|
||||
fi
|
||||
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
docker compose down --volumes
|
||||
|
||||
1
test/notifications/.gitignore
vendored
Normal file
1
test/notifications/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -12,7 +12,7 @@ services:
|
||||
NOTIFICATION_URLS: ${NOTIFICATION_URLS}
|
||||
EXTRA_VALUE: extra-value
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- app_data:/backup/app_data:ro
|
||||
- ./notifications.tmpl:/etc/dockervolumebackup/notifications.d/notifications.tmpl
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
|
||||
@@ -46,3 +46,5 @@ if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
|
||||
fail "Unexpected notification body $MESSAGE_BODY"
|
||||
fi
|
||||
pass "Custom notification body was used."
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
1
test/ownership/.gitignore
vendored
Normal file
1
test/ownership/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
local
|
||||
@@ -9,9 +9,9 @@ services:
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: 1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
|
||||
POSTGRES_USER: test
|
||||
POSTGRES_DB: test
|
||||
- POSTGRES_PASSWORD=1FHJMSwt0yhIN1zS7I4DilGUhThBKq0x
|
||||
- POSTGRES_USER=test
|
||||
- POSTGRES_DB=test
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION}
|
||||
@@ -21,7 +21,7 @@ services:
|
||||
volumes:
|
||||
- postgres_data:/backup/postgres:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
18
test/ownership/run.sh
Executable file → Normal file
18
test/ownership/run.sh
Executable file → Normal file
@@ -7,22 +7,24 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
tar --same-owner -xvf "$LOCAL_DIR/backup.tar.gz" -C $TMP_DIR
|
||||
tmp_dir=$(mktemp -d)
|
||||
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
|
||||
|
||||
find $TMP_DIR/backup/postgres > /dev/null
|
||||
sudo find $tmp_dir/backup/postgres > /dev/null
|
||||
pass "Backup contains files at expected location"
|
||||
|
||||
for file in $(find $TMP_DIR/backup/postgres); do
|
||||
if [ "$(stat -c '%u:%g' $file)" != "70:70" ]; then
|
||||
fail "Unexpected file ownership for $file: $(stat -c '%u:%g' $file)"
|
||||
for file in $(sudo find $tmp_dir/backup/postgres); do
|
||||
if [ "$(sudo stat -c '%u:%g' $file)" != "70:70" ]; then
|
||||
fail "Unexpected file ownership for $file: $(sudo stat -c '%u:%g' $file)"
|
||||
fi
|
||||
done
|
||||
pass "All files and directories in backup preserved their ownership."
|
||||
|
||||
docker compose down --volumes
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
docker network create test_network
|
||||
docker volume create app_data
|
||||
|
||||
LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker run -d -q \
|
||||
--name offen \
|
||||
--network test_network \
|
||||
-v app_data:/var/opt/offen/ \
|
||||
offen/offen:latest
|
||||
|
||||
sleep 5
|
||||
|
||||
docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v $LOCAL_DIR:/archive \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
--env BACKUP_COMPRESSION=gz \
|
||||
--env GZIP_PARALLELISM=0 \
|
||||
--env BACKUP_FILENAME='test.{{ .Extension }}' \
|
||||
--entrypoint backup \
|
||||
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C $tmp_dir
|
||||
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
pass "Found relevant files in untared local backup."
|
||||
|
||||
# This test does not stop containers during backup. This is happening on
|
||||
# purpose in order to cover this setup as well.
|
||||
expect_running_containers "1"
|
||||
@@ -1,50 +0,0 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||
environment:
|
||||
MINIO_ROOT_USER: test
|
||||
MINIO_ROOT_PASSWORD: test
|
||||
MINIO_ACCESS_KEY: test
|
||||
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||
volumes:
|
||||
- minio_backup_data:/data
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
hostname: hostnametoken
|
||||
depends_on:
|
||||
- minio
|
||||
restart: always
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: test
|
||||
AWS_SECRET_ACCESS_KEY: GMusLtUmILge2by+z890kQ
|
||||
AWS_ENDPOINT: minio:9000
|
||||
AWS_ENDPOINT_PROTO: http
|
||||
AWS_S3_BUCKET_NAME: backup
|
||||
BACKUP_FILENAME_EXPAND: 'true'
|
||||
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
BACKUP_RETENTION_DAYS: 7
|
||||
BACKUP_PRUNING_LEEWAY: 5s
|
||||
BACKUP_PRUNING_PREFIX: test
|
||||
BACKUP_LATEST_SYMLINK: test-$$HOSTNAME.latest.tar.gz
|
||||
BACKUP_SKIP_BACKENDS_FROM_PRUNE: 's3'
|
||||
volumes:
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./local:/archive
|
||||
|
||||
offen:
|
||||
image: offen/offen:latest
|
||||
labels:
|
||||
- docker-volume-backup.stop-during-backup=true
|
||||
volumes:
|
||||
- app_data:/var/opt/offen
|
||||
|
||||
volumes:
|
||||
app_data:
|
||||
minio_backup_data:
|
||||
name: minio_backup_data
|
||||
@@ -1,70 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Tests prune-skipping with multiple backends (local, s3)
|
||||
# Pruning itself is tested individually for each storage backend
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
mkdir -p local
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
sleep 5
|
||||
|
||||
expect_running_containers "3"
|
||||
|
||||
touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz
|
||||
|
||||
docker run --rm \
|
||||
-v minio_backup_data:/minio_data \
|
||||
alpine \
|
||||
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz'
|
||||
|
||||
# Skip s3 backend from prune
|
||||
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create backup with no prune for s3 backend"
|
||||
docker compose exec backup backup
|
||||
|
||||
info "Check if old backup has been pruned (local)"
|
||||
test ! -f ./local/test-hostnametoken-old.tar.gz
|
||||
|
||||
info "Check if old backup has NOT been pruned (s3)"
|
||||
docker run --rm \
|
||||
-v minio_backup_data:/minio_data \
|
||||
alpine \
|
||||
ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz'
|
||||
|
||||
pass "Old remote backup has been pruned locally, skipped S3 backend is untouched."
|
||||
|
||||
# Skip local and s3 backend from prune (all backends)
|
||||
|
||||
touch -r ./local/test-hostnametoken.tar.gz -d "14 days ago" ./local/test-hostnametoken-old.tar.gz
|
||||
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create backup with no prune for both backends"
|
||||
docker compose exec -e BACKUP_SKIP_BACKENDS_FROM_PRUNE="s3,local" backup backup
|
||||
|
||||
info "Check if old backup has NOT been pruned (local)"
|
||||
if [ ! -f ./local/test-hostnametoken-old.tar.gz ]; then
|
||||
fail "Backdated file has not been deleted"
|
||||
fi
|
||||
|
||||
info "Check if old backup has NOT been pruned (s3)"
|
||||
docker run --rm \
|
||||
-v minio_backup_data:/minio_data \
|
||||
alpine \
|
||||
ash -c 'test -f /minio_data/backup/test-hostnametoken-old.tar.gz'
|
||||
|
||||
pass "Skipped all backends while pruning."
|
||||
@@ -6,9 +6,11 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
# A symlink for a known file in the volume is created so the test can check
|
||||
# whether symlinks are preserved on backup.
|
||||
docker compose exec backup backup
|
||||
|
||||
sleep 5
|
||||
@@ -24,6 +26,7 @@ pass "Found relevant files in untared remote backups."
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
@@ -36,26 +39,4 @@ docker run --rm \
|
||||
|
||||
pass "Remote backups have not been deleted."
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create first backup with no prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
docker run --rm \
|
||||
-v minio_backup_data:/minio_data \
|
||||
alpine \
|
||||
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /minio_data/backup/test-hostnametoken-old.tar.gz'
|
||||
|
||||
info "Create second backup and prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
docker run --rm \
|
||||
-v minio_backup_data:/minio_data \
|
||||
alpine \
|
||||
ash -c 'test ! -f /minio_data/backup/test-hostnametoken-old.tar.gz && test -f /minio_data/backup/test-hostnametoken.tar.gz'
|
||||
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
docker compose down --volumes
|
||||
|
||||
@@ -31,12 +31,14 @@ pass "Found relevant files in untared backup."
|
||||
sleep 5
|
||||
expect_running_containers "5"
|
||||
|
||||
docker exec -e AWS_ACCESS_KEY_ID=test $(docker ps -q -f name=backup) backup \
|
||||
&& fail "Backup should have failed due to duplicate env variables."
|
||||
docker stack rm test_stack
|
||||
|
||||
pass "Backup failed due to duplicate env variables."
|
||||
docker secret rm minio_root_password
|
||||
docker secret rm minio_root_user
|
||||
|
||||
docker exec -e AWS_ACCESS_KEY_ID_FILE=/tmp/nonexistant $(docker ps -q -f name=backup) backup \
|
||||
&& fail "Backup should have failed due to non existing file env variable."
|
||||
docker swarm leave --force
|
||||
|
||||
pass "Backup failed due to non existing file env variable."
|
||||
sleep 10
|
||||
|
||||
docker volume rm backup_data
|
||||
docker volume rm pg_data
|
||||
|
||||
@@ -8,7 +8,7 @@ services:
|
||||
- PGID=1000
|
||||
- USER_NAME=test
|
||||
volumes:
|
||||
- ${KEY_DIR:-.}/id_rsa.pub:/config/.ssh/authorized_keys
|
||||
- ./id_rsa.pub:/config/.ssh/authorized_keys
|
||||
- ssh_backup_data:/tmp
|
||||
|
||||
backup:
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
SSH_REMOTE_PATH: /tmp
|
||||
SSH_IDENTITY_PASSPHRASE: test1234
|
||||
volumes:
|
||||
- ${KEY_DIR:-.}/id_rsa:/root/.ssh/id_rsa
|
||||
- ./id_rsa:/root/.ssh/id_rsa
|
||||
- app_data:/backup/app_data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
|
||||
@@ -6,11 +6,9 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export KEY_DIR=$(mktemp -d)
|
||||
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
|
||||
|
||||
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f "$KEY_DIR/id_rsa" -C "docker-volume-backup@local"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
@@ -28,6 +26,7 @@ pass "Found relevant files in decrypted and untared remote backups."
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
@@ -40,28 +39,5 @@ docker run --rm \
|
||||
|
||||
pass "Remote backups have not been deleted."
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create first backup with no prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
# Set the modification date of the old backup to 14 days ago
|
||||
docker run --rm \
|
||||
-v ssh_backup_data:/ssh_data \
|
||||
--user 1000 \
|
||||
alpine \
|
||||
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /ssh_data/test-hostnametoken-old.tar.gz'
|
||||
|
||||
info "Create second backup and prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
docker run --rm \
|
||||
-v ssh_backup_data:/ssh_data \
|
||||
alpine \
|
||||
ash -c 'test ! -f /ssh_data/test-hostnametoken-old.tar.gz && test -f /ssh_data/test-hostnametoken.tar.gz'
|
||||
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
docker compose down --volumes
|
||||
rm -f id_rsa id_rsa.pub
|
||||
|
||||
@@ -27,3 +27,11 @@ pass "Found relevant files in untared backup."
|
||||
|
||||
sleep 5
|
||||
expect_running_containers "5"
|
||||
|
||||
docker stack rm test_stack
|
||||
docker swarm leave --force
|
||||
|
||||
sleep 10
|
||||
|
||||
docker volume rm backup_data
|
||||
docker volume rm pg_data
|
||||
|
||||
64
test/test.sh
64
test/test.sh
@@ -2,69 +2,15 @@
|
||||
|
||||
set -e
|
||||
|
||||
MATCH_PATTERN=$1
|
||||
IMAGE_TAG=${IMAGE_TAG:-canary}
|
||||
TEST_VERSION=${1:-canary}
|
||||
|
||||
sandbox="docker_volume_backup_test_sandbox"
|
||||
tarball="$(mktemp -d)/image.tar.gz"
|
||||
|
||||
trap finish EXIT INT TERM
|
||||
|
||||
finish () {
|
||||
rm -rf $(dirname $tarball)
|
||||
if [ ! -z $(docker ps -aq --filter=name=$sandbox) ]; then
|
||||
docker rm -f $(docker stop $sandbox)
|
||||
fi
|
||||
if [ ! -z $(docker volume ls -q --filter=name="^${sandbox}\$") ]; then
|
||||
docker volume rm $sandbox
|
||||
fi
|
||||
}
|
||||
|
||||
docker build -t offen/docker-volume-backup:test-sandbox .
|
||||
|
||||
if [ ! -z "$BUILD_IMAGE" ]; then
|
||||
docker build -t offen/docker-volume-backup:$IMAGE_TAG $(dirname $(pwd))
|
||||
fi
|
||||
|
||||
docker save offen/docker-volume-backup:$IMAGE_TAG -o $tarball
|
||||
|
||||
find_args="-mindepth 1 -maxdepth 1 -type d"
|
||||
if [ ! -z "$MATCH_PATTERN" ]; then
|
||||
find_args="$find_args -name $MATCH_PATTERN"
|
||||
fi
|
||||
|
||||
for dir in $(find $find_args | sort); do
|
||||
dir=$(echo $dir | cut -c 3-)
|
||||
for dir in $(ls -d -- */); do
|
||||
test="${dir}run.sh"
|
||||
echo "################################################"
|
||||
echo "Now running ${dir}"
|
||||
echo "Now running $test"
|
||||
echo "################################################"
|
||||
echo ""
|
||||
|
||||
test="${dir}/run.sh"
|
||||
docker_run_args="--name "$sandbox" --detach \
|
||||
--privileged \
|
||||
-v $(dirname $(pwd)):/code \
|
||||
-v $tarball:/cache/image.tar.gz \
|
||||
-v $sandbox:/var/lib/docker"
|
||||
|
||||
if [ -z "$NO_IMAGE_CACHE" ]; then
|
||||
docker_run_args="$docker_run_args \
|
||||
-v "${sandbox}_image":/var/lib/docker/image \
|
||||
-v "${sandbox}_overlay2":/var/lib/docker/overlay2"
|
||||
fi
|
||||
|
||||
docker run $docker_run_args offen/docker-volume-backup:test-sandbox
|
||||
|
||||
until docker exec $sandbox /bin/sh -c 'docker info' > /dev/null 2>&1; do
|
||||
sleep 0.5
|
||||
done
|
||||
sleep 0.5
|
||||
|
||||
docker exec $sandbox /bin/sh -c "docker load -i /cache/image.tar.gz"
|
||||
docker exec -e TEST_VERSION=$IMAGE_TAG $sandbox /bin/sh -c "/code/test/$test"
|
||||
|
||||
docker rm $(docker stop $sandbox)
|
||||
docker volume rm $sandbox
|
||||
TEST_VERSION=$TEST_VERSION /bin/sh $test
|
||||
echo ""
|
||||
echo "$test passed"
|
||||
echo ""
|
||||
|
||||
2
test/user/.gitignore
vendored
Normal file
2
test/user/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
local
|
||||
backup
|
||||
@@ -10,6 +10,7 @@ services:
|
||||
- docker-volume-backup.archive-pre.user=testuser
|
||||
- docker-volume-backup.archive-pre=/bin/sh -c 'whoami > /tmp/whoami.txt'
|
||||
|
||||
|
||||
backup:
|
||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
deploy:
|
||||
@@ -20,7 +21,7 @@ services:
|
||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||
EXEC_FORWARD_OUTPUT: "true"
|
||||
volumes:
|
||||
- ${LOCAL_DIR:-./local}:/archive
|
||||
- ./local:/archive
|
||||
- app_data:/backup/data:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
|
||||
|
||||
18
test/user/run.sh
Executable file → Normal file
18
test/user/run.sh
Executable file → Normal file
@@ -6,25 +6,25 @@ cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
export LOCAL_DIR=$(mktemp -d)
|
||||
export TMP_DIR=$(mktemp -d)
|
||||
docker compose up -d
|
||||
|
||||
echo "LOCAL_DIR $LOCAL_DIR"
|
||||
echo "TMP_DIR $TMP_DIR"
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
user_name=testuser
|
||||
docker exec user-alpine-1 adduser --disabled-password "$user_name"
|
||||
|
||||
docker compose exec backup backup
|
||||
|
||||
tar -xvf "$LOCAL_DIR/test.tar.gz" -C "$TMP_DIR"
|
||||
if [ ! -f "$TMP_DIR/backup/data/whoami.txt" ]; then
|
||||
tar -xvf ./local/test.tar.gz
|
||||
if [ ! -f ./backup/data/whoami.txt ]; then
|
||||
fail "Could not find file written by pre command."
|
||||
fi
|
||||
pass "Found expected file."
|
||||
|
||||
if [ "$(cat $TMP_DIR/backup/data/whoami.txt)" != "$user_name" ]; then
|
||||
tar -xvf ./local/test.tar.gz
|
||||
if [ "$(cat ./backup/data/whoami.txt)" != "$user_name" ]; then
|
||||
fail "Could not find expected user name."
|
||||
fi
|
||||
pass "Found expected user."
|
||||
|
||||
docker compose down --volumes
|
||||
sudo rm -rf ./local
|
||||
|
||||
|
||||
25
test/util.sh
25
test/util.sh
@@ -15,34 +15,9 @@ fail () {
|
||||
exit 1
|
||||
}
|
||||
|
||||
skip () {
|
||||
echo "[test:${current_test:-none}:skip] "$1""
|
||||
exit 0
|
||||
}
|
||||
|
||||
expect_running_containers () {
|
||||
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
|
||||
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
|
||||
fi
|
||||
pass "$1 containers running."
|
||||
}
|
||||
|
||||
docker() {
|
||||
case $1 in
|
||||
compose)
|
||||
shift
|
||||
case $1 in
|
||||
up)
|
||||
shift
|
||||
command docker compose up --timeout 3 "$@";;
|
||||
down)
|
||||
shift
|
||||
command docker compose down --timeout 3 "$@";;
|
||||
*)
|
||||
command docker compose "$@";;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
command docker "$@";;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ cd "$(dirname "$0")"
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
docker compose up -d --quiet-pull
|
||||
docker compose up -d
|
||||
sleep 5
|
||||
|
||||
docker compose exec backup backup
|
||||
@@ -24,6 +24,7 @@ pass "Found relevant files in untared remote backup."
|
||||
|
||||
# The second part of this test checks if backups get deleted when the retention
|
||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
@@ -36,28 +37,4 @@ docker run --rm \
|
||||
|
||||
pass "Remote backups have not been deleted."
|
||||
|
||||
# The third part of this test checks if old backups get deleted when the retention
|
||||
# is set to 7 days (which it should)
|
||||
|
||||
BACKUP_RETENTION_DAYS="7" docker compose up -d
|
||||
sleep 5
|
||||
|
||||
info "Create first backup with no prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
# Set the modification date of the old backup to 14 days ago
|
||||
docker run --rm \
|
||||
-v webdav_backup_data:/webdav_data \
|
||||
--user 82 \
|
||||
alpine \
|
||||
ash -c 'touch -d@$(( $(date +%s) - 1209600 )) /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz'
|
||||
|
||||
info "Create second backup and prune"
|
||||
docker compose exec backup backup
|
||||
|
||||
docker run --rm \
|
||||
-v webdav_backup_data:/webdav_data \
|
||||
alpine \
|
||||
ash -c 'test ! -f /webdav_data/data/my/new/path/test-hostnametoken-old.tar.gz && test -f /webdav_data/data/my/new/path/test-hostnametoken.tar.gz'
|
||||
|
||||
pass "Old remote backup has been pruned, new one is still present."
|
||||
docker compose down --volumes
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
cd $(dirname $0)
|
||||
. ../util.sh
|
||||
current_test=$(basename $(pwd))
|
||||
|
||||
docker network create test_network
|
||||
docker volume create app_data
|
||||
|
||||
LOCAL_DIR=$(mktemp -d)
|
||||
|
||||
docker run -d -q \
|
||||
--name offen \
|
||||
--network test_network \
|
||||
-v app_data:/var/opt/offen/ \
|
||||
offen/offen:latest
|
||||
|
||||
sleep 10
|
||||
|
||||
docker run --rm -q \
|
||||
--network test_network \
|
||||
-v app_data:/backup/app_data \
|
||||
-v $LOCAL_DIR:/archive \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
--env BACKUP_COMPRESSION=zst \
|
||||
--env BACKUP_FILENAME='test.{{ .Extension }}' \
|
||||
--entrypoint backup \
|
||||
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
tar -xvf "$LOCAL_DIR/test.tar.zst" --zstd -C $tmp_dir
|
||||
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||
fail "Could not find expected file in untared archive."
|
||||
fi
|
||||
pass "Found relevant files in untared local backup."
|
||||
|
||||
# This test does not stop containers during backup. This is happening on
|
||||
# purpose in order to cover this setup as well.
|
||||
expect_running_containers "1"
|
||||
Reference in New Issue
Block a user