Compare commits

..

9 Commits

Author SHA1 Message Date
Frederik Ring
7ec6f154ec Allow use of managed identity credential 2022-12-25 09:58:01 +01:00
Frederik Ring
49a10094cc Tidy go.mod file 2022-12-24 10:41:11 +01:00
Frederik Ring
676cfbe25f Add azure to notifications doc 2022-12-24 10:37:47 +01:00
Frederik Ring
1fa0548756 Add support for remote path 2022-12-24 10:30:12 +01:00
Frederik Ring
c0eff2e14f Add documentation for Azure Blob Storage 2022-12-24 09:38:23 +01:00
Frederik Ring
fdce7ee454 Implement pruning for Azure blob storage 2022-12-24 09:28:28 +01:00
Frederik Ring
a253fdfbec Set up automated testing for Azure Storage 2022-12-24 08:48:09 +01:00
Frederik Ring
7aa2166aee Implement copy for Azure Blob Storage 2022-12-23 10:14:37 +01:00
Frederik Ring
e702b2b682 Scaffold Azure storage backend that does nothing yet 2022-12-23 09:21:26 +01:00
46 changed files with 478 additions and 1458 deletions

75
.circleci/config.yml Normal file
View File

@@ -0,0 +1,75 @@
version: 2.1
jobs:
canary:
machine:
image: ubuntu-2004:202201-02
working_directory: ~/docker-volume-backup
resource_class: large
steps:
- checkout
- run:
name: Build
command: |
docker build . -t offen/docker-volume-backup:canary
- run:
name: Install gnupg
command: |
sudo apt-get install -y gnupg
- run:
name: Run tests
working_directory: ~/docker-volume-backup/test
command: |
export GPG_TTY=$(tty)
./test.sh canary
build:
docker:
- image: cimg/base:2020.06
environment:
DOCKER_BUILDKIT: '1'
DOCKER_CLI_EXPERIMENTAL: enabled
working_directory: ~/docker-volume-backup
resource_class: large
steps:
- checkout
- setup_remote_docker:
version: 20.10.6
- docker/install-docker-credential-helper:
release-tag: v0.6.4
- docker/configure-docker-credentials-store
- run:
name: Push to Docker Hub
command: |
echo "$DOCKER_ACCESSTOKEN" | docker login --username offen --password-stdin
# This is required for building ARM: https://gitlab.alpinelinux.org/alpine/aports/-/issues/12406
docker run --rm --privileged linuxkit/binfmt:v0.8
docker context create docker-volume-backup
docker buildx create docker-volume-backup --name docker-volume-backup --use
docker buildx inspect --bootstrap
tag_args="-t offen/docker-volume-backup:$CIRCLE_TAG"
if [[ "$CIRCLE_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest`
tag_args="$tag_args -t offen/docker-volume-backup:latest"
tag_args="$tag_args -t offen/docker-volume-backup:$(echo "$CIRCLE_TAG" | cut -d. -f1)"
fi
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
$tag_args . --push
workflows:
version: 2
docker_image:
jobs:
- canary:
filters:
tags:
ignore: /^v.*/
- build:
filters:
branches:
ignore: /.*/
tags:
only: /^v.*/
orbs:
docker: circleci/docker@2.1.4

View File

@@ -8,9 +8,7 @@ assignees: ''
--- ---
**Describe the bug** **Describe the bug**
<!--
A clear and concise description of what the bug is. A clear and concise description of what the bug is.
-->
**To Reproduce** **To Reproduce**
Steps to reproduce the behavior: Steps to reproduce the behavior:
@@ -19,16 +17,12 @@ Steps to reproduce the behavior:
3. ... 3. ...
**Expected behavior** **Expected behavior**
<!--
A clear and concise description of what you expected to happen. A clear and concise description of what you expected to happen.
-->
**Version (please complete the following information):** **Desktop (please complete the following information):**
- Image Version: <!-- e.g. v2.21.0 --> - Image Version: [e.g. v2.21.0]
- Docker Version: <!-- e.g. 20.10.17 --> - Docker Version: [e.g. 20.10.17]
- Docker Compose Version (if applicable): <!-- e.g. 1.29.2 --> - Docker Compose Version (if applicable): [e.g. 1.29.2]
**Additional context** **Additional context**
<!--
Add any other context about the problem here. Add any other context about the problem here.
-->

View File

@@ -8,21 +8,13 @@ assignees: ''
--- ---
**Is your feature request related to a problem? Please describe.** **Is your feature request related to a problem? Please describe.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-->
**Describe the solution you'd like** **Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen. A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered** **Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered. A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context** **Additional context**
<!--
Add any other context or screenshots about the feature request here. Add any other context or screenshots about the feature request here.
-->

View File

@@ -8,21 +8,13 @@ assignees: ''
--- ---
**What are you trying to do?** **What are you trying to do?**
<!--
A clear and concise description of what you are trying to do, but cannot get working. A clear and concise description of what you are trying to do, but cannot get working.
-->
**What is your current configuration?** **What is your current configuration?**
<!--
Add the full configuration you are using. Please redact out any real-world credentials. Add the full configuration you are using. Please redact out any real-world credentials.
-->
**Log output** **Log output**
<!--
Provide the full log output of your setup. Provide the full log output of your setup.
-->
**Additional context** **Additional context**
<!--
Add any other context or screenshots about the support request here. Add any other context or screenshots about the support request here.
-->

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: docker
directory: /
schedule:
interval: weekly
- package-ecosystem: gomod
directory: /
schedule:
interval: weekly

View File

@@ -1,59 +0,0 @@
name: Release Docker Image
on:
push:
tags: v**
jobs:
push_to_registries:
name: Push Docker image to multiple registries
runs-on: ubuntu-latest
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Log in to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to GHCR
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract Docker tags
id: meta
run: |
version_tag="${{github.ref_name}}"
tags=($version_tag)
if [[ "$version_tag" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
# prerelease tags like `v2.0.0-alpha.1` should not be released as `latest` nor `v2`
tags+=("latest")
tags+=($(echo "$version_tag" | cut -d. -f1))
fi
releases=""
for tag in "${tags[@]}"; do
releases="${releases:+$releases,}offen/docker-volume-backup:$tag,ghcr.io/offen/docker-volume-backup:$tag"
done
echo "releases=$releases" >> "$GITHUB_OUTPUT"
- name: Build and push Docker images
uses: docker/build-push-action@v4
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.releases }}

View File

@@ -1,30 +0,0 @@
name: Run Integration Tests
on:
push:
branches:
- main
pull_request:
jobs:
test:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build Docker Image
env:
DOCKER_BUILDKIT: '1'
run: docker build . -t offen/docker-volume-backup:test
- name: Run Tests
working-directory: ./test
run: |
# Stop the buildx container so the tests can make assertions
# about the number of running containers
docker rm -f $(docker ps -aq)
export GPG_TTY=$(tty)
./test.sh test

View File

@@ -1,7 +1,7 @@
# Copyright 2021 - Offen Authors <hioffen@posteo.de> # Copyright 2021 - Offen Authors <hioffen@posteo.de>
# SPDX-License-Identifier: MPL-2.0 # SPDX-License-Identifier: MPL-2.0
FROM golang:1.20-alpine as builder FROM golang:1.19-alpine as builder
WORKDIR /app WORKDIR /app
COPY . . COPY . .
@@ -9,13 +9,15 @@ RUN go mod download
WORKDIR /app/cmd/backup WORKDIR /app/cmd/backup
RUN go build -o backup . RUN go build -o backup .
FROM alpine:3.18 FROM alpine:3.16
WORKDIR /root WORKDIR /root
RUN apk add --no-cache ca-certificates busybox-extras RUN apk add --no-cache ca-certificates
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
COPY --chmod=755 ./entrypoint.sh /root/
COPY ./entrypoint.sh /root/
RUN chmod +x entrypoint.sh
ENTRYPOINT ["/root/entrypoint.sh"] ENTRYPOINT ["/root/entrypoint.sh"]

112
README.md
View File

@@ -4,7 +4,7 @@
# docker-volume-backup # docker-volume-backup
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage or SSH compatible storage. Backup Docker volumes locally or to any S3 compatible storage.
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup. The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__. It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
@@ -14,7 +14,6 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
- [Quickstart](#quickstart) - [Quickstart](#quickstart)
- [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup) - [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup)
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli) - [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
- [Available image registries](#available-image-registries)
- [Configuration reference](#configuration-reference) - [Configuration reference](#configuration-reference)
- [How to](#how-to) - [How to](#how-to)
- [Stop containers during backup](#stop-containers-during-backup) - [Stop containers during backup](#stop-containers-during-backup)
@@ -31,11 +30,9 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
- [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage) - [Replace deprecated `BACKUP_FROM_SNAPSHOT` usage](#replace-deprecated-backup_from_snapshot-usage)
- [Replace deprecated `exec-pre` and `exec-post` labels](#replace-deprecated-exec-pre-and-exec-post-labels) - [Replace deprecated `exec-pre` and `exec-post` labels](#replace-deprecated-exec-pre-and-exec-post-labels)
- [Using a custom Docker host](#using-a-custom-docker-host) - [Using a custom Docker host](#using-a-custom-docker-host)
- [Use with rootless Docker](#use-with-rootless-docker)
- [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container) - [Run multiple backup schedules in the same container](#run-multiple-backup-schedules-in-the-same-container)
- [Define different retention schedules](#define-different-retention-schedules) - [Define different retention schedules](#define-different-retention-schedules)
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls) - [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
- [Recipes](#recipes) - [Recipes](#recipes)
- [Backing up to AWS S3](#backing-up-to-aws-s3) - [Backing up to AWS S3](#backing-up-to-aws-s3)
- [Backing up to Filebase](#backing-up-to-filebase) - [Backing up to Filebase](#backing-up-to-filebase)
@@ -122,18 +119,6 @@ docker run --rm \
Alternatively, pass a `--env-file` in order to use a full config as described below. Alternatively, pass a `--env-file` in order to use a full config as described below.
### Available image registries
This Docker image is published to both Docker Hub and the GitHub container registry.
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
```
docker pull offen/docker-volume-backup:v2
docker pull ghcr.io/offen/docker-volume-backup:v2
```
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
## Configuration reference ## Configuration reference
Backup targets, schedule and retention are configured in environment variables. Backup targets, schedule and retention are configured in environment variables.
@@ -260,15 +245,6 @@ You can populate below template according to your requirements and use it as you
# AWS_STORAGE_CLASS="GLACIER" # AWS_STORAGE_CLASS="GLACIER"
# Setting this variable will change the S3 default part size for the copy step.
# This value is useful when you want to upload large files.
# NB : While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
# While Minio uses a hard coded value to 10.000. As a workaround, try to set a higher value.
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
# The unit is in MB and an integer.
# AWS_PART_SIZE=16
# You can also backup files to any WebDAV server: # You can also backup files to any WebDAV server:
# The URL of the remote WebDAV server # The URL of the remote WebDAV server
@@ -328,8 +304,7 @@ You can populate below template according to your requirements and use it as you
# SSH_IDENTITY_PASSPHRASE="pass" # SSH_IDENTITY_PASSPHRASE="pass"
# The credential's account name when using Azure Blob Storage. This has to be # The credential's account name when using Azure Blob Storage.
# set when using Azure Blob Storage.
# AZURE_STORAGE_ACCOUNT_NAME="account-name" # AZURE_STORAGE_ACCOUNT_NAME="account-name"
@@ -343,7 +318,7 @@ You can populate below template according to your requirements and use it as you
# AZURE_STORAGE_CONTAINER_NAME="container-name" # AZURE_STORAGE_CONTAINER_NAME="container-name"
# The service endpoint when using Azure Blob Storage. This is a template that # The service endpoint when using Azure Blob Storage. This is a template that
# can be passed the account name as shown in the default value below. # will be passed the account name as shown in the default value below.
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/" # AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
@@ -433,7 +408,7 @@ You can populate below template according to your requirements and use it as you
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes. # Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
# Configuration is provided as a comma-separated list of URLs as consumed # Configuration is provided as a comma-separated list of URLs as consumed
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/ # by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
# The content of such notifications can be customized. Dedicated documentation # The content of such notifications can be customized. Dedicated documentation
# on how to do this can be found in the README. When providing multiple URLs or # on how to do this can be found in the README. When providing multiple URLs or
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities. # an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
@@ -577,7 +552,7 @@ services:
Notification backends other than email are also supported. Notification backends other than email are also supported.
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration. Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/ [shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
### Customize notifications ### Customize notifications
@@ -666,24 +641,6 @@ volumes:
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding. The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
However there are no guarantees about the order in which they are run, which could also happen concurrently. However there are no guarantees about the order in which they are run, which could also happen concurrently.
By default the backup command is executed by the user provided by the container's image.
It is possible to specify a custom user that is used to run commands in dedicated labels with the format `docker-volume-backup.[step]-[pre|post].user`:
```yml
version: '3'
services:
gitea:
image: gitea/gitea
volumes:
- backup_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=git
- docker-volume-backup.archive-pre=/bin/bash -c 'cd /tmp; /usr/local/bin/gitea dump -c /data/gitea/conf/app.ini -R -f dump.zip'
```
Make sure the user exists and is present in `passwd` inside the target container.
### Encrypting your backup using GPG ### Encrypting your backup using GPG
The image supports encrypting backups using GPG out of the box. The image supports encrypting backups using GPG out of the box.
@@ -823,7 +780,7 @@ services:
- docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app - docker-volume-backup.archive-post=rm -rf /tmp/backup/my-app
backup: backup:
image: offen/docker-volume-backup:v2 image: offen/docker-volume-backup:latest
environment: environment:
BACKUP_SOURCES: /tmp/backup BACKUP_SOURCES: /tmp/backup
volumes: volumes:
@@ -861,23 +818,6 @@ DOCKER_HOST=tcp://docker_socket_proxy:2375
In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint. In case you are using a socket proxy, it must support `GET` and `POST` requests to the `/containers` endpoint. If you are using Docker Swarm, it must also support the `/services` endpoint. If you are using pre/post backup commands, it must also support the `/exec` endpoint.
### Use with rootless Docker
It's also possible to use this image with a [rootless Docker installation][rootless-docker].
Instead of mounting `/var/run/docker.sock`, mount the user-specific socket into the container:
```yml
services:
backup:
image: offen/docker-volume-backup:v2
# ... configuration omitted
volumes:
- backup:/backup:ro
- /run/user/1000/docker.sock:/var/run/docker.sock:ro
```
[rootless-docker]: https://docs.docker.com/engine/security/rootless/
### Run multiple backup schedules in the same container ### Run multiple backup schedules in the same container
Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`: Multiple backup schedules with different configuration can be configured by mounting an arbitrary number of configuration files (using the `.env` format) into `/etc/dockervolumebackup/conf.d`:
@@ -928,7 +868,7 @@ BACKUP_SOURCES=/backup/app2_data
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules. If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup/conf.d`: For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
```ini ```ini
# 01daily.conf # 01daily.conf
@@ -973,44 +913,6 @@ where service is any of the [supported services][shoutrrr-docs], e.g. for SMTP:
docker run --rm -ti containrrr/shoutrrr generate smtp docker run --rm -ti containrrr/shoutrrr generate smtp
``` ```
### Handle file uploads using third party tools
If you want to use a non-supported storage backend, or want to use a third party (e.g. rsync, rclone) tool for file uploads, you can build a Docker image containing the required binaries off this one, and call through to these in lifecycle hooks.
For example, if you wanted to use `rsync`, define your Docker image like this:
```Dockerfile
FROM offen/docker-volume-backup:v2
RUN apk add rsync
```
Using this image, you can now omit configuring any of the supported storage backends, and instead define your own mechanism in a `docker-volume-backup.copy-post` label:
```yml
version: '3'
services:
backup:
image: your-custom-image
restart: always
environment:
BACKUP_FILENAME: "daily-backup-%Y-%m-%dT%H-%M-%S.tar.gz"
BACKUP_CRON_EXPRESSION: "0 2 * * *"
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'rsync $$COMMAND_RUNTIME_ARCHIVE_FILEPATH /destination'
volumes:
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
# other services defined here ...
volumes:
app_data:
```
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
## Recipes ## Recipes
This section lists configuration for some real-world use cases that you can mix and match according to your needs. This section lists configuration for some real-world use cases that you can mix and match according to your needs.

View File

@@ -28,7 +28,6 @@ type Config struct {
AwsSecretAccessKey string `split_words:"true"` AwsSecretAccessKey string `split_words:"true"`
AwsSecretAccessKeyFile string `split_words:"true"` AwsSecretAccessKeyFile string `split_words:"true"`
AwsIamRoleEndpoint string `split_words:"true"` AwsIamRoleEndpoint string `split_words:"true"`
AwsPartSize int64 `split_words:"true"`
BackupSources string `split_words:"true" default:"/backup"` BackupSources string `split_words:"true" default:"/backup"`
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"` BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
BackupFilenameExpand bool `split_words:"true"` BackupFilenameExpand bool `split_words:"true"`

View File

@@ -21,17 +21,12 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
func (s *script) exec(containerRef string, command string, user string) ([]byte, []byte, error) { func (s *script) exec(containerRef string, command string) ([]byte, []byte, error) {
args, _ := argv.Argv(command, nil, nil) args, _ := argv.Argv(command, nil, nil)
commandEnv := []string{
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
}
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{ execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
Cmd: args[0], Cmd: args[0],
AttachStdin: true, AttachStdin: true,
AttachStderr: true, AttachStderr: true,
Env: commandEnv,
User: user,
}) })
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err) return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
@@ -91,6 +86,7 @@ func (s *script) runLabeledCommands(label string) error {
}) })
} }
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...), Filters: filters.NewArgs(f...),
}) })
if err != nil { if err != nil {
@@ -104,6 +100,7 @@ func (s *script) runLabeledCommands(label string) error {
Value: "docker-volume-backup.exec-pre", Value: "docker-volume-backup.exec-pre",
} }
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...), Filters: filters.NewArgs(f...),
}) })
if err != nil { if err != nil {
@@ -121,6 +118,7 @@ func (s *script) runLabeledCommands(label string) error {
Value: "docker-volume-backup.exec-post", Value: "docker-volume-backup.exec-post",
} }
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(f...), Filters: filters.NewArgs(f...),
}) })
if err != nil { if err != nil {
@@ -157,11 +155,8 @@ func (s *script) runLabeledCommands(label string) error {
cmd, _ = c.Labels["docker-volume-backup.exec-post"] cmd, _ = c.Labels["docker-volume-backup.exec-post"]
} }
userLabelName := fmt.Sprintf("%s.user", label)
user := c.Labels[userLabelName]
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/")) s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
stdout, stderr, err := s.exec(c.ID, cmd, user) stdout, stderr, err := s.exec(c.ID, cmd)
if s.c.ExecForwardOutput { if s.c.ExecForwardOutput {
os.Stderr.Write(stderr) os.Stderr.Write(stderr)
os.Stdout.Write(stdout) os.Stdout.Write(stdout)

View File

@@ -4,9 +4,10 @@
package main package main
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
// hook contains a queued action that can be trigger them when the script // hook contains a queued action that can be trigger them when the script
@@ -51,7 +52,7 @@ func (s *script) runHooks(err error) error {
} }
} }
if len(actionErrors) != 0 { if len(actionErrors) != 0 {
return errors.Join(actionErrors...) return utilities.Join(actionErrors...)
} }
return nil return nil
} }

View File

@@ -6,13 +6,13 @@ package main
import ( import (
"bytes" "bytes"
_ "embed" _ "embed"
"errors"
"fmt" "fmt"
"os" "os"
"text/template" "text/template"
"time" "time"
sTypes "github.com/containrrr/shoutrrr/pkg/types" sTypes "github.com/containrrr/shoutrrr/pkg/types"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
//go:embed notifications.tmpl //go:embed notifications.tmpl
@@ -69,7 +69,7 @@ func (s *script) sendNotification(title, body string) error {
} }
} }
if len(errs) != 0 { if len(errs) != 0 {
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...)) return fmt.Errorf("sendNotification: error sending message: %w", utilities.Join(errs...))
} }
return nil return nil
} }

View File

@@ -5,7 +5,6 @@ package main
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
@@ -21,11 +20,11 @@ import (
"github.com/offen/docker-volume-backup/internal/storage/s3" "github.com/offen/docker-volume-backup/internal/storage/s3"
"github.com/offen/docker-volume-backup/internal/storage/ssh" "github.com/offen/docker-volume-backup/internal/storage/ssh"
"github.com/offen/docker-volume-backup/internal/storage/webdav" "github.com/offen/docker-volume-backup/internal/storage/webdav"
"github.com/offen/docker-volume-backup/internal/utilities"
"github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr"
"github.com/containrrr/shoutrrr/pkg/router" "github.com/containrrr/shoutrrr/pkg/router"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
ctr "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client" "github.com/docker/docker/client"
@@ -111,7 +110,7 @@ func newScript() (*script, error) {
s.cli = cli s.cli = cli
} }
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) { logFunc := func(logType storage.LogLevel, context string, msg string, params ...interface{}) {
switch logType { switch logType {
case storage.LogLevelWarning: case storage.LogLevelWarning:
s.logger.Warnf("["+context+"] "+msg, params...) s.logger.Warnf("["+context+"] "+msg, params...)
@@ -143,7 +142,6 @@ func newScript() (*script, error) {
BucketName: s.c.AwsS3BucketName, BucketName: s.c.AwsS3BucketName,
StorageClass: s.c.AwsStorageClass, StorageClass: s.c.AwsStorageClass,
CACert: s.c.AwsEndpointCACert.Cert, CACert: s.c.AwsEndpointCACert.Cert,
PartSize: s.c.AwsPartSize,
} }
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil { if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
return nil, err return nil, err
@@ -282,7 +280,9 @@ func (s *script) stopContainers() (func() error, error) {
return noop, nil return noop, nil
} }
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{}) allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
})
if err != nil { if err != nil {
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err) return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
} }
@@ -292,6 +292,7 @@ func (s *script) stopContainers() (func() error, error) {
s.c.BackupStopContainerLabel, s.c.BackupStopContainerLabel,
) )
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
Quiet: true,
Filters: filters.NewArgs(filters.KeyValuePair{ Filters: filters.NewArgs(filters.KeyValuePair{
Key: "label", Key: "label",
Value: containerLabel, Value: containerLabel,
@@ -316,7 +317,7 @@ func (s *script) stopContainers() (func() error, error) {
var stoppedContainers []types.Container var stoppedContainers []types.Container
var stopErrors []error var stopErrors []error
for _, container := range containersToStop { for _, container := range containersToStop {
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil { if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
stopErrors = append(stopErrors, err) stopErrors = append(stopErrors, err)
} else { } else {
stoppedContainers = append(stoppedContainers, container) stoppedContainers = append(stoppedContainers, container)
@@ -328,7 +329,7 @@ func (s *script) stopContainers() (func() error, error) {
stopError = fmt.Errorf( stopError = fmt.Errorf(
"stopContainers: %d error(s) stopping containers: %w", "stopContainers: %d error(s) stopping containers: %w",
len(stopErrors), len(stopErrors),
errors.Join(stopErrors...), utilities.Join(stopErrors...),
) )
} }
@@ -365,7 +366,7 @@ func (s *script) stopContainers() (func() error, error) {
if serviceMatch.ID == "" { if serviceMatch.ID == "" {
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName) return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
} }
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1 serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
if _, err := s.cli.ServiceUpdate( if _, err := s.cli.ServiceUpdate(
context.Background(), serviceMatch.ID, context.Background(), serviceMatch.ID,
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{}, serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
@@ -379,7 +380,7 @@ func (s *script) stopContainers() (func() error, error) {
return fmt.Errorf( return fmt.Errorf(
"stopContainers: %d error(s) restarting containers and services: %w", "stopContainers: %d error(s) restarting containers and services: %w",
len(restartErrors), len(restartErrors),
errors.Join(restartErrors...), utilities.Join(restartErrors...),
) )
} }
s.logger.Infof( s.logger.Infof(

View File

@@ -22,12 +22,5 @@ else
done done
fi fi
if [ ! -z "$SERVE_METRICS_PATH" ]; then
mkdir -p /var/www/html${SERVE_METRICS_PATH}
echo "ok" > /var/www/html${SERVE_METRICS_PATH}/metrics.txt
httpd -h /var/www/html -p "${SERVE_METRICS_PORT:-80}"
echo "Serving metrics on port ${SERVE_METRICS_PORT:-80}."
fi
echo "Starting cron in foreground." echo "Starting cron in foreground."
crond -f -d 8 crond -f -d 8

69
go.mod
View File

@@ -3,59 +3,72 @@ module github.com/offen/docker-volume-backup
go 1.19 go 1.19
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 github.com/containrrr/shoutrrr v0.5.2
github.com/containrrr/shoutrrr v0.7.1
github.com/cosiner/argv v0.1.0 github.com/cosiner/argv v0.1.0
github.com/docker/docker v24.0.5+incompatible github.com/docker/docker v20.10.11+incompatible
github.com/gofrs/flock v0.8.1 github.com/gofrs/flock v0.8.1
github.com/kelseyhightower/envconfig v1.4.0 github.com/kelseyhightower/envconfig v1.4.0
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
github.com/minio/minio-go/v7 v7.0.61 github.com/minio/minio-go/v7 v7.0.44
github.com/otiai10/copy v1.11.0 github.com/otiai10/copy v1.7.0
github.com/pkg/sftp v1.13.5 github.com/pkg/sftp v1.13.5
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.0
github.com/studio-b12/gowebdav v0.9.0 github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
golang.org/x/crypto v0.11.0 golang.org/x/crypto v0.3.0
golang.org/x/sync v0.3.0 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
) )
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect github.com/containerd/containerd v1.6.6 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
github.com/fatih/color v1.13.0 // indirect github.com/fatih/color v1.10.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/cpuid/v2 v2.2.1 // indirect
github.com/kr/fs v0.1.0 // indirect github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-isatty v0.0.12 // indirect
github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect github.com/minio/sha256-simd v1.0.0 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nxadm/tail v1.4.6 // indirect
github.com/onsi/ginkgo v1.14.2 // indirect
github.com/onsi/gomega v1.10.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/rs/xid v1.5.0 // indirect github.com/rs/xid v1.4.0 // indirect
golang.org/x/net v0.12.0 // indirect golang.org/x/net v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect golang.org/x/sys v0.2.0 // indirect
golang.org/x/text v0.11.0 // indirect golang.org/x/text v0.4.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
) )

1149
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -6,11 +6,9 @@ package azure
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"sync" "sync"
"text/template" "text/template"
"time" "time"
@@ -19,6 +17,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
type azureBlobStorage struct { type azureBlobStorage struct {
@@ -46,7 +45,6 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
if err := endpointTemplate.Execute(&ep, opts); err != nil { if err := endpointTemplate.Execute(&ep, opts); err != nil {
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err) return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
} }
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
var client *azblob.Client var client *azblob.Client
if opts.PrimaryAccountKey != "" { if opts.PrimaryAccountKey != "" {
@@ -55,7 +53,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err) return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
} }
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil) client, err = azblob.NewClientWithSharedKeyCredential(ep.String(), cred, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err) return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
} }
@@ -64,7 +62,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
if err != nil { if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err) return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
} }
client, err = azblob.NewClient(normalizedEndpoint, cred, nil) client, err = azblob.NewClient(ep.String(), cred, nil)
if err != nil { if err != nil {
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err) return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
} }
@@ -135,21 +133,21 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error { if err := b.DoPrune(b.Name(), len(matches), int(totalCount), "Azure Blob Storage backup(s)", func() error {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(len(matches)) wg.Add(len(matches))
var errs []error var errors []error
for _, match := range matches { for _, match := range matches {
name := match name := match
go func() { go func() {
_, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil) _, err := b.client.DeleteBlob(context.Background(), b.containerName, name, nil)
if err != nil { if err != nil {
errs = append(errs, err) errors = append(errors, err)
} }
wg.Done() wg.Done()
}() }()
} }
wg.Wait() wg.Wait()
if len(errs) != 0 { if len(errors) != 0 {
return errors.Join(errs...) return utilities.Join(errors...)
} }
return nil return nil
}); err != nil { }); err != nil {

View File

@@ -4,7 +4,6 @@
package local package local
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@@ -13,6 +12,7 @@ import (
"time" "time"
"github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
type localStorage struct { type localStorage struct {
@@ -127,7 +127,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
return fmt.Errorf( return fmt.Errorf(
"(*localStorage).Prune: %d error(s) deleting local files, starting with: %w", "(*localStorage).Prune: %d error(s) deleting local files, starting with: %w",
len(removeErrors), len(removeErrors),
errors.Join(removeErrors...), utilities.Join(removeErrors...),
) )
} }
return nil return nil

View File

@@ -8,7 +8,6 @@ import (
"crypto/x509" "crypto/x509"
"errors" "errors"
"fmt" "fmt"
"os"
"path" "path"
"path/filepath" "path/filepath"
"time" "time"
@@ -16,6 +15,7 @@ import (
"github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/credentials"
"github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/storage"
"github.com/offen/docker-volume-backup/internal/utilities"
) )
type s3Storage struct { type s3Storage struct {
@@ -23,7 +23,6 @@ type s3Storage struct {
client *minio.Client client *minio.Client
bucket string bucket string
storageClass string storageClass string
partSize int64
} }
// Config contains values that define the configuration of a S3 backend. // Config contains values that define the configuration of a S3 backend.
@@ -37,7 +36,6 @@ type Config struct {
RemotePath string RemotePath string
BucketName string BucketName string
StorageClass string StorageClass string
PartSize int64
CACert *x509.Certificate CACert *x509.Certificate
} }
@@ -92,7 +90,6 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
client: mc, client: mc,
bucket: opts.BucketName, bucket: opts.BucketName,
storageClass: opts.StorageClass, storageClass: opts.StorageClass,
partSize: opts.PartSize,
}, nil }, nil
} }
@@ -104,32 +101,16 @@ func (v *s3Storage) Name() string {
// Copy copies the given file to the S3/Minio storage backend. // Copy copies the given file to the S3/Minio storage backend.
func (b *s3Storage) Copy(file string) error { func (b *s3Storage) Copy(file string) error {
_, name := path.Split(file) _, name := path.Split(file)
putObjectOptions := minio.PutObjectOptions{
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
ContentType: "application/tar+gzip", ContentType: "application/tar+gzip",
StorageClass: b.storageClass, StorageClass: b.storageClass,
} }); err != nil {
if b.partSize > 0 {
srcFileInfo, err := os.Stat(file)
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
}
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
if err != nil {
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
}
putObjectOptions.PartSize = uint64(partSize)
}
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
if errResp := minio.ToErrorResponse(err); errResp.Message != "" { if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
} }
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err) return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
} }
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
return nil return nil
@@ -178,7 +159,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
} }
} }
if len(removeErrors) != 0 { if len(removeErrors) != 0 {
return errors.Join(removeErrors...) return utilities.Join(removeErrors...)
} }
return nil return nil
}); err != nil { }); err != nil {

View File

@@ -29,7 +29,7 @@ const (
LogLevelError LogLevelError
) )
type Log func(logType LogLevel, context string, msg string, params ...any) type Log func(logType LogLevel, context string, msg string, params ...interface{})
// PruneStats is a wrapper struct for returning stats after pruning // PruneStats is a wrapper struct for returning stats after pruning
type PruneStats struct { type PruneStats struct {

View File

@@ -67,17 +67,15 @@ func (b *webDavStorage) Name() string {
// Copy copies the given file to the WebDav storage backend. // Copy copies the given file to the WebDav storage backend.
func (b *webDavStorage) Copy(file string) error { func (b *webDavStorage) Copy(file string) error {
bytes, err := os.ReadFile(file)
_, name := path.Split(file) _, name := path.Split(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error reading the file to be uploaded: %w", err)
}
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err) return fmt.Errorf("(*webDavStorage).Copy: Error creating directory '%s' on WebDAV server: %w", b.DestinationPath, err)
} }
if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil {
r, err := os.Open(file)
if err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error opening the file to be uploaded: %w", err)
}
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err) return fmt.Errorf("(*webDavStorage).Copy: Error uploading the file to WebDAV server: %w", err)
} }
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath) b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to WebDAV URL '%s' at path '%s'.", file, b.url, b.DestinationPath)

View File

@@ -0,0 +1,24 @@
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
// SPDX-License-Identifier: MPL-2.0
package utilities
import (
"errors"
"strings"
)
// Join takes a list of errors and joins them into a single error
func Join(errs ...error) error {
if len(errs) == 1 {
return errs[0]
}
var msgs []string
for _, err := range errs {
if err == nil {
continue
}
msgs = append(msgs, err.Error())
}
return errors.New("[" + strings.Join(msgs, ", ") + "]")
}

View File

@@ -4,7 +4,7 @@ services:
storage: storage:
image: mcr.microsoft.com/azure-storage/azurite image: mcr.microsoft.com/azure-storage/azurite
volumes: volumes:
- azurite_backup_data:/data - ./foo:/data
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
healthcheck: healthcheck:
test: nc 127.0.0.1 10000 -z test: nc 127.0.0.1 10000 -z

View File

@@ -6,18 +6,18 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker-compose up -d
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check # A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup. # whether symlinks are preserved on backup.
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
expect_running_containers "3" expect_running_containers "3"
docker compose run --rm az_cli \ docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
@@ -26,15 +26,15 @@ pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
docker compose run --rm az_cli \ docker-compose run --rm az_cli \
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
test -f ./local/test.tar.gz test -f ./local/test.tar.gz
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
docker compose down --volumes docker-compose down --volumes

View File

@@ -24,20 +24,20 @@ openssl x509 -req -passin pass:test \
openssl x509 -in minio.crt -noout -text openssl x509 -in minio.crt -noout -text
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
expect_running_containers "3" expect_running_containers "3"
docker run --rm \ docker run --rm -it \
-v minio_backup_data:/minio_data \ -v minio_backup_data:/minio_data \
alpine \ alpine \
ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' ash -c 'tar -xvf /minio_data/backup/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
pass "Found relevant files in untared remote backups." pass "Found relevant files in untared remote backups."
docker compose down --volumes docker-compose down --volumes

View File

@@ -48,7 +48,7 @@ docker run --rm \
--entrypoint backup \ --entrypoint backup \
offen/docker-volume-backup:${TEST_VERSION:-canary} offen/docker-volume-backup:${TEST_VERSION:-canary}
docker run --rm \ docker run --rm -it \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data' ash -c 'tar -xvf /data/backup/test.tar.gz && test -f /backup/app_data/offen.db && test -d /backup/empty_data'

View File

@@ -42,9 +42,10 @@ services:
EXEC_LABEL: test EXEC_LABEL: test
EXEC_FORWARD_OUTPUT: "true" EXEC_FORWARD_OUTPUT: "true"
volumes: volumes:
- ./local:/archive - archive:/archive
- app_data:/backup/data:ro - app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
volumes: volumes:
app_data: app_data:
archive:

View File

@@ -6,12 +6,11 @@ cd $(dirname $0)
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
mkdir -p ./local docker-compose up -d
docker compose up -d
sleep 30 # mariadb likes to take a bit before responding sleep 30 # mariadb likes to take a bit before responding
docker compose exec backup backup docker-compose exec backup backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' commands_archive) ./local
tar -xvf ./local/test.tar.gz tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then if [ ! -f ./backup/data/dump.sql ]; then
@@ -29,13 +28,12 @@ if [ -f ./backup/data/post.txt ]; then
fi fi
pass "Did not find unexpected file." pass "Did not find unexpected file."
docker compose down --volumes docker-compose down --volumes
sudo rm -rf ./local sudo rm -rf ./local
info "Running commands test in swarm mode next." info "Running commands test in swarm mode next."
mkdir -p ./local
docker swarm init docker swarm init
docker stack deploy --compose-file=docker-compose.yml test_stack docker stack deploy --compose-file=docker-compose.yml test_stack
@@ -49,6 +47,8 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup docker exec $(docker ps -q -f name=backup) backup
sudo cp -r $(docker volume inspect --format='{{ .Mountpoint }}' test_stack_archive) ./local
tar -xvf ./local/test.tar.gz tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/dump.sql ]; then if [ ! -f ./backup/data/dump.sql ]; then
fail "Could not find file written by pre command." fail "Could not find file written by pre command."

View File

@@ -8,12 +8,12 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
# sleep until a backup is guaranteed to have happened on the 1 minute schedule # sleep until a backup is guaranteed to have happened on the 1 minute schedule
sleep 100 sleep 100
docker compose down --volumes docker-compose down --volumes
if [ ! -f ./local/conf.tar.gz ]; then if [ ! -f ./local/conf.tar.gz ]; then
fail "Config from file was not used." fail "Config from file was not used."

View File

@@ -1,4 +0,0 @@
ARG version=canary
FROM offen/docker-volume-backup:$version
RUN apk add rsync

View File

@@ -1,26 +0,0 @@
version: '3'
services:
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
restart: always
labels:
- docker-volume-backup.copy-post=/bin/sh -c 'mkdir -p /tmp/unpack && tar -xvf $$COMMAND_RUNTIME_ARCHIVE_FILEPATH -C /tmp/unpack && rsync -r /tmp/unpack/backup/app_data /local'
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/local
- app_data:/backup/app_data:ro
- /var/run/docker.sock:/var/run/docker.sock
offen:
image: offen/offen:latest
labels:
- docker-volume-backup.stop-during-backup=true
volumes:
- app_data:/var/opt/offen
volumes:
app_data:

View File

@@ -1,29 +0,0 @@
#!/bin/sh
set -e
cd "$(dirname "$0")"
. ../util.sh
current_test=$(basename $(pwd))
mkdir -p local
export BASE_VERSION="${TEST_VERSION:-canary}"
export TEST_VERSION="${TEST_VERSION:-canary}-with-rsync"
docker build . -t offen/docker-volume-backup:$TEST_VERSION --build-arg version=$BASE_VERSION
docker compose up -d
sleep 5
docker compose exec backup backup
sleep 5
expect_running_containers "2"
if [ ! -f "./local/app_data/offen.db" ]; then
fail "Could not find expected file in untared archive."
fi
docker compose down --volumes

View File

@@ -8,10 +8,10 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
expect_running_containers "2" expect_running_containers "2"
@@ -30,4 +30,4 @@ if [ ! -L ./local/test-latest.tar.gz.gpg ]; then
fail "Could not find local symlink to latest encrypted backup." fail "Could not find local symlink to latest encrypted backup."
fi fi
docker compose down --volumes docker-compose down --volumes

View File

@@ -8,11 +8,11 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
docker compose down --volumes docker-compose down --volumes
out=$(mktemp -d) out=$(mktemp -d)
sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out" sudo tar --same-owner -xvf ./local/test.tar.gz -C "$out"

View File

@@ -8,13 +8,13 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check # A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup. # whether symlinks are preserved on backup.
docker compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link docker-compose exec offen ln -s /var/opt/offen/offen.db /var/opt/offen/db.link
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
@@ -42,14 +42,14 @@ pass "Found symlink to latest version in local backup."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
if [ "$(find ./local -type f | wc -l)" != "1" ]; then if [ "$(find ./local -type f | wc -l)" != "1" ]; then
fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)"" fail "Backups should not have been deleted, instead seen: "$(find ./local -type f)""
fi fi
pass "Local backups have not been deleted." pass "Local backups have not been deleted."
docker compose down --volumes docker-compose down --volumes

View File

@@ -8,13 +8,13 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
sleep 5 sleep 5
GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token') GOTIFY_TOKEN=$(curl -sSLX POST -H 'Content-Type: application/json' -d '{"name":"test"}' http://admin:custom@localhost:8080/application | jq -r '.token')
info "Set up Gotify application using token $GOTIFY_TOKEN" info "Set up Gotify application using token $GOTIFY_TOKEN"
docker compose exec backup backup docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length') NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 0 ]; then if [ "$NUM_MESSAGES" != 0 ]; then
@@ -22,11 +22,11 @@ if [ "$NUM_MESSAGES" != 0 ]; then
fi fi
pass "No notifications were sent when not configured." pass "No notifications were sent when not configured."
docker compose down docker-compose down
NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker compose up -d NOTIFICATION_URLS="gotify://gotify/${GOTIFY_TOKEN}?disableTLS=true" docker-compose up -d
docker compose exec backup backup docker-compose exec backup backup
NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length') NUM_MESSAGES=$(curl -sSL http://admin:custom@localhost:8080/message | jq -r '.messages | length')
if [ "$NUM_MESSAGES" != 1 ]; then if [ "$NUM_MESSAGES" != 1 ]; then
@@ -47,4 +47,4 @@ if [ "$MESSAGE_BODY" != "Backing up /tmp/test.tar.gz succeeded." ]; then
fi fi
pass "Custom notification body was used." pass "Custom notification body was used."
docker compose down --volumes docker-compose down --volumes

View File

@@ -9,10 +9,10 @@ current_test=$(basename $(pwd))
mkdir -p local mkdir -p local
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
tmp_dir=$(mktemp -d) tmp_dir=$(mktemp -d)
sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir sudo tar --same-owner -xvf ./local/backup.tar.gz -C $tmp_dir
@@ -27,4 +27,4 @@ for file in $(sudo find $tmp_dir/backup/postgres); do
done done
pass "All files and directories in backup preserved their ownership." pass "All files and directories in backup preserved their ownership."
docker compose down --volumes docker-compose down --volumes

View File

@@ -6,18 +6,18 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker-compose up -d
sleep 5 sleep 5
# A symlink for a known file in the volume is created so the test can check # A symlink for a known file in the volume is created so the test can check
# whether symlinks are preserved on backup. # whether symlinks are preserved on backup.
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
expect_running_containers "3" expect_running_containers "3"
docker run --rm \ docker run --rm -it \
-v minio_backup_data:/minio_data \ -v minio_backup_data:/minio_data \
alpine \ alpine \
ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' ash -c 'tar -xvf /minio_data/backup/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
@@ -27,16 +27,16 @@ pass "Found relevant files in untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
docker run --rm \ docker run --rm -it \
-v minio_backup_data:/minio_data \ -v minio_backup_data:/minio_data \
alpine \ alpine \
ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]' ash -c '[ $(find /minio_data/backup/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
docker compose down --volumes docker-compose down --volumes

View File

@@ -22,7 +22,7 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup docker exec $(docker ps -q -f name=backup) backup
docker run --rm \ docker run --rm -it \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION' ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'

View File

@@ -8,16 +8,16 @@ current_test=$(basename $(pwd))
ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local" ssh-keygen -t rsa -m pem -b 4096 -N "test1234" -f id_rsa -C "docker-volume-backup@local"
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
expect_running_containers 3 expect_running_containers 3
docker run --rm \ docker run --rm -it \
-v ssh_backup_data:/ssh_data \ -v ssh_backup_data:/ssh_data \
alpine \ alpine \
ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' ash -c 'tar -xvf /ssh_data/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
@@ -27,17 +27,17 @@ pass "Found relevant files in decrypted and untared remote backups."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
docker run --rm \ docker run --rm -it \
-v ssh_backup_data:/ssh_data \ -v ssh_backup_data:/ssh_data \
alpine \ alpine \
ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]' ash -c '[ $(find /ssh_data/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
docker compose down --volumes docker-compose down --volumes
rm -f id_rsa id_rsa.pub rm -f id_rsa id_rsa.pub

View File

@@ -19,7 +19,7 @@ sleep 20
docker exec $(docker ps -q -f name=backup) backup docker exec $(docker ps -q -f name=backup) backup
docker run --rm \ docker run --rm -it \
-v backup_data:/data alpine \ -v backup_data:/data alpine \
ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION' ash -c 'tar -xf /data/backup/test.tar.gz && test -f /backup/pg_data/PG_VERSION'

View File

@@ -1,2 +0,0 @@
local
backup

View File

@@ -1,30 +0,0 @@
version: '2.4'
services:
alpine:
image: alpine:3.17.3
tty: true
volumes:
- app_data:/tmp
labels:
- docker-volume-backup.archive-pre.user=testuser
- docker-volume-backup.archive-pre=/bin/sh -c 'whoami > /tmp/whoami.txt'
backup:
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
deploy:
restart_policy:
condition: on-failure
environment:
BACKUP_FILENAME: test.tar.gz
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
EXEC_FORWARD_OUTPUT: "true"
volumes:
- ./local:/archive
- app_data:/backup/data:ro
- /var/run/docker.sock:/var/run/docker.sock
volumes:
app_data:
archive:

View File

@@ -1,30 +0,0 @@
#!/bin/sh
set -e
cd $(dirname $0)
. ../util.sh
current_test=$(basename $(pwd))
docker compose up -d
user_name=testuser
docker exec user-alpine-1 adduser --disabled-password "$user_name"
docker compose exec backup backup
tar -xvf ./local/test.tar.gz
if [ ! -f ./backup/data/whoami.txt ]; then
fail "Could not find file written by pre command."
fi
pass "Found expected file."
tar -xvf ./local/test.tar.gz
if [ "$(cat ./backup/data/whoami.txt)" != "$user_name" ]; then
fail "Could not find expected user name."
fi
pass "Found expected user."
docker compose down --volumes
sudo rm -rf ./local

View File

@@ -6,16 +6,16 @@ cd "$(dirname "$0")"
. ../util.sh . ../util.sh
current_test=$(basename $(pwd)) current_test=$(basename $(pwd))
docker compose up -d docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
sleep 5 sleep 5
expect_running_containers "3" expect_running_containers "3"
docker run --rm \ docker run --rm -it \
-v webdav_backup_data:/webdav_data \ -v webdav_backup_data:/webdav_data \
alpine \ alpine \
ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db' ash -c 'tar -xvf /webdav_data/data/my/new/path/test-hostnametoken.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db'
@@ -25,16 +25,16 @@ pass "Found relevant files in untared remote backup."
# The second part of this test checks if backups get deleted when the retention # The second part of this test checks if backups get deleted when the retention
# is set to 0 days (which it should not as it would mean all backups get deleted) # is set to 0 days (which it should not as it would mean all backups get deleted)
# TODO: find out if we can test actual deletion without having to wait for a day # TODO: find out if we can test actual deletion without having to wait for a day
BACKUP_RETENTION_DAYS="0" docker compose up -d BACKUP_RETENTION_DAYS="0" docker-compose up -d
sleep 5 sleep 5
docker compose exec backup backup docker-compose exec backup backup
docker run --rm \ docker run --rm -it \
-v webdav_backup_data:/webdav_data \ -v webdav_backup_data:/webdav_data \
alpine \ alpine \
ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]' ash -c '[ $(find /webdav_data/data/my/new/path/ -type f | wc -l) = "1" ]'
pass "Remote backups have not been deleted." pass "Remote backups have not been deleted."
docker compose down --volumes docker-compose down --volumes