mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-06 17:38:01 +01:00
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ba8143be2 | ||
|
|
b90fc9ea4d | ||
|
|
e08a3303bf | ||
|
|
47326c7c59 | ||
|
|
67e7288855 | ||
|
|
1765b06835 | ||
|
|
67d978f515 | ||
|
|
a93ff6fe09 | ||
|
|
1c6f64e254 | ||
|
|
085d2c5dfd | ||
|
|
b1382dee00 | ||
|
|
c3732107b1 | ||
|
|
d288c87c54 | ||
|
|
47491439a1 | ||
|
|
94f71ac765 | ||
|
|
2addf1dd6c | ||
|
|
c07990eaf6 | ||
|
|
a27743bd32 | ||
|
|
9d5b897ab4 | ||
|
|
30bf31cd90 | ||
|
|
32e9a05b40 | ||
|
|
b302884447 | ||
|
|
b3e1ce27be | ||
|
|
66518ed0ff | ||
|
|
14d966d41a | ||
|
|
336dece328 | ||
|
|
dc8172b673 | ||
|
|
5ea9a7ce15 | ||
|
|
bcffe0bc25 | ||
|
|
144e65ce6f | ||
|
|
07afa53cd3 | ||
|
|
9a07f5486b | ||
|
|
d4c5f65f31 | ||
|
|
5b8a484d80 | ||
|
|
37c01a578c | ||
|
|
46c6441d48 |
10
.github/dependabot.yml
vendored
Normal file
10
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: docker
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -11,10 +11,20 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- name: Build Docker Image
|
- name: Build Docker Image
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: '1'
|
||||||
run: docker build . -t offen/docker-volume-backup:test
|
run: docker build . -t offen/docker-volume-backup:test
|
||||||
|
|
||||||
- name: Run Tests
|
- name: Run Tests
|
||||||
working-directory: ./test
|
working-directory: ./test
|
||||||
run: |
|
run: |
|
||||||
|
# Stop the buildx container so the tests can make assertions
|
||||||
|
# about the number of running containers
|
||||||
|
docker rm -f $(docker ps -aq)
|
||||||
export GPG_TTY=$(tty)
|
export GPG_TTY=$(tty)
|
||||||
./test.sh test
|
./test.sh test
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.20-alpine as builder
|
FROM golang:1.21-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -9,15 +9,13 @@ RUN go mod download
|
|||||||
WORKDIR /app/cmd/backup
|
WORKDIR /app/cmd/backup
|
||||||
RUN go build -o backup .
|
RUN go build -o backup .
|
||||||
|
|
||||||
FROM alpine:3.17
|
FROM alpine:3.18
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates
|
||||||
|
|
||||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
|
COPY --chmod=755 ./entrypoint.sh /root/
|
||||||
COPY ./entrypoint.sh /root/
|
|
||||||
RUN chmod +x entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["/root/entrypoint.sh"]
|
ENTRYPOINT ["/root/entrypoint.sh"]
|
||||||
|
|||||||
144
README.md
144
README.md
@@ -4,16 +4,17 @@
|
|||||||
|
|
||||||
# docker-volume-backup
|
# docker-volume-backup
|
||||||
|
|
||||||
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage or SSH compatible storage.
|
Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage.
|
||||||
|
|
||||||
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
The [offen/docker-volume-backup](https://hub.docker.com/r/offen/docker-volume-backup) Docker image can be used as a lightweight (below 15MB) sidecar container to an existing Docker setup.
|
||||||
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
It handles __recurring or one-off backups of Docker volumes__ to a __local directory__, __any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage (or any combination) and rotates away old backups__ if configured. It also supports __encrypting your backups using GPG__ and __sending notifications for failed backup runs__.
|
||||||
|
|
||||||
<!-- MarkdownTOC -->
|
<!-- MarkdownTOC -->
|
||||||
|
|
||||||
- [Quickstart](#quickstart)
|
- [Quickstart](#quickstart)
|
||||||
- [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup)
|
- [Recurring backups in a compose setup](#recurring-backups-in-a-compose-setup)
|
||||||
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
|
- [One-off backups using Docker CLI](#one-off-backups-using-docker-cli)
|
||||||
|
- [Available image registries](#available-image-registries)
|
||||||
- [Configuration reference](#configuration-reference)
|
- [Configuration reference](#configuration-reference)
|
||||||
- [How to](#how-to)
|
- [How to](#how-to)
|
||||||
- [Stop containers during backup](#stop-containers-during-backup)
|
- [Stop containers during backup](#stop-containers-during-backup)
|
||||||
@@ -35,6 +36,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Define different retention schedules](#define-different-retention-schedules)
|
- [Define different retention schedules](#define-different-retention-schedules)
|
||||||
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
|
- [Use special characters in notification URLs](#use-special-characters-in-notification-urls)
|
||||||
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
|
- [Handle file uploads using third party tools](#handle-file-uploads-using-third-party-tools)
|
||||||
|
- [Setup Dropbox storage backend](#setup-dropbox-storage-backend)
|
||||||
- [Recipes](#recipes)
|
- [Recipes](#recipes)
|
||||||
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
- [Backing up to AWS S3](#backing-up-to-aws-s3)
|
||||||
- [Backing up to Filebase](#backing-up-to-filebase)
|
- [Backing up to Filebase](#backing-up-to-filebase)
|
||||||
@@ -43,6 +45,7 @@ It handles __recurring or one-off backups of Docker volumes__ to a __local direc
|
|||||||
- [Backing up to WebDAV](#backing-up-to-webdav)
|
- [Backing up to WebDAV](#backing-up-to-webdav)
|
||||||
- [Backing up to SSH](#backing-up-to-ssh)
|
- [Backing up to SSH](#backing-up-to-ssh)
|
||||||
- [Backing up to Azure Blob Storage](#backing-up-to-azure-blob-storage)
|
- [Backing up to Azure Blob Storage](#backing-up-to-azure-blob-storage)
|
||||||
|
- [Backing up to Dropbox](#backing-up-to-dropbox)
|
||||||
- [Backing up locally](#backing-up-locally)
|
- [Backing up locally](#backing-up-locally)
|
||||||
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
- [Backing up to AWS S3 as well as locally](#backing-up-to-aws-s3-as-well-as-locally)
|
||||||
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
- [Running on a custom cron schedule](#running-on-a-custom-cron-schedule)
|
||||||
@@ -121,6 +124,18 @@ docker run --rm \
|
|||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
|
|
||||||
|
### Available image registries
|
||||||
|
|
||||||
|
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||||
|
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull offen/docker-volume-backup:v2
|
||||||
|
docker pull ghcr.io/offen/docker-volume-backup:v2
|
||||||
|
```
|
||||||
|
|
||||||
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
## Configuration reference
|
## Configuration reference
|
||||||
|
|
||||||
Backup targets, schedule and retention are configured in environment variables.
|
Backup targets, schedule and retention are configured in environment variables.
|
||||||
@@ -135,13 +150,22 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
|
||||||
# The name of the backup file including the `.tar.gz` extension.
|
# The compression algorithm used in conjunction with tar.
|
||||||
|
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
||||||
|
# Note that the selection affects the file extension.
|
||||||
|
|
||||||
|
# BACKUP_COMPRESSION="gz"
|
||||||
|
|
||||||
|
# The name of the backup file including the extension.
|
||||||
# Format verbs will be replaced as in `strftime`. Omitting them
|
# Format verbs will be replaced as in `strftime`. Omitting them
|
||||||
# will result in the same filename for every backup run, which means previous
|
# will result in the same filename for every backup run, which means previous
|
||||||
# versions will be overwritten on subsequent runs. The default results
|
# versions will be overwritten on subsequent runs.
|
||||||
# in filenames like `backup-2021-08-29T04-00-00.tar.gz`.
|
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||||
|
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
||||||
|
# on your BACKUP_COMPRESSION setting).
|
||||||
|
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||||
|
|
||||||
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.tar.gz"
|
# BACKUP_FILENAME="backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"
|
||||||
|
|
||||||
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
# Setting BACKUP_FILENAME_EXPAND to true allows for environment variable
|
||||||
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
# placeholders in BACKUP_FILENAME, BACKUP_LATEST_SYMLINK and in
|
||||||
@@ -247,6 +271,15 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# AWS_STORAGE_CLASS="GLACIER"
|
# AWS_STORAGE_CLASS="GLACIER"
|
||||||
|
|
||||||
|
# Setting this variable will change the S3 default part size for the copy step.
|
||||||
|
# This value is useful when you want to upload large files.
|
||||||
|
# NB : While using Scaleway as S3 provider, be aware that the parts counter is set to 1.000.
|
||||||
|
# While Minio uses a hard coded value to 10.000. As a workaround, try to set a higher value.
|
||||||
|
# Defaults to "16" (MB) if unset (from minio), you can set this value according to your needs.
|
||||||
|
# The unit is in MB and an integer.
|
||||||
|
|
||||||
|
# AWS_PART_SIZE=16
|
||||||
|
|
||||||
# You can also backup files to any WebDAV server:
|
# You can also backup files to any WebDAV server:
|
||||||
|
|
||||||
# The URL of the remote WebDAV server
|
# The URL of the remote WebDAV server
|
||||||
@@ -325,6 +358,26 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
|
# AZURE_STORAGE_ENDPOINT="https://{{ .AccountName }}.blob.core.windows.net/"
|
||||||
|
|
||||||
|
# Absolute remote path in your Dropbox where the backups shall be stored.
|
||||||
|
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||||
|
# Consulte the README for further information.
|
||||||
|
|
||||||
|
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||||
|
|
||||||
|
# Number of concurrent chunked uploads for Dropbox.
|
||||||
|
# Values above 6 usually result in no enhancements.
|
||||||
|
|
||||||
|
# DROPBOX_CONCURRENCY_LEVEL="6"
|
||||||
|
|
||||||
|
# App key and app secret from your app created at https://www.dropbox.com/developers/apps/info
|
||||||
|
|
||||||
|
# DROPBOX_APP_KEY=""
|
||||||
|
# DROPBOX_APP_SECRET=""
|
||||||
|
|
||||||
|
# Refresh token to request new short-lived tokens (OAuth2). Consult README to see how to get one.
|
||||||
|
|
||||||
|
# DROPBOX_REFRESH_TOKEN=""
|
||||||
|
|
||||||
# In addition to storing backups remotely, you can also keep local copies.
|
# In addition to storing backups remotely, you can also keep local copies.
|
||||||
# Pass a container-local path to store your backups if needed. You also need to
|
# Pass a container-local path to store your backups if needed. You also need to
|
||||||
# mount a local folder or Docker volume into that location (`/archive`
|
# mount a local folder or Docker volume into that location (`/archive`
|
||||||
@@ -411,7 +464,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||||
# The content of such notifications can be customized. Dedicated documentation
|
# The content of such notifications can be customized. Dedicated documentation
|
||||||
# on how to do this can be found in the README. When providing multiple URLs or
|
# on how to do this can be found in the README. When providing multiple URLs or
|
||||||
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||||
@@ -555,7 +608,7 @@ services:
|
|||||||
Notification backends other than email are also supported.
|
Notification backends other than email are also supported.
|
||||||
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
||||||
|
|
||||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.5/services/overview/
|
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
||||||
|
|
||||||
### Customize notifications
|
### Customize notifications
|
||||||
|
|
||||||
@@ -644,7 +697,23 @@ volumes:
|
|||||||
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
|
The backup procedure is guaranteed to wait for all `pre` or `post` commands to finish before proceeding.
|
||||||
However there are no guarantees about the order in which they are run, which could also happen concurrently.
|
However there are no guarantees about the order in which they are run, which could also happen concurrently.
|
||||||
|
|
||||||
By default the backup command is executed by the root user. It is possible to specify a custom user in container labels with the format `docker-volume-backup.[step]-[pre|post]-[user]`. The option will allow you to run a specific step command by specified user. Make sure the user exists and present in `passwd` inside the target container.
|
By default the backup command is executed by the user provided by the container's image.
|
||||||
|
It is possible to specify a custom user that is used to run commands in dedicated labels with the format `docker-volume-backup.[step]-[pre|post].user`:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
gitea:
|
||||||
|
image: gitea/gitea
|
||||||
|
volumes:
|
||||||
|
- backup_data:/tmp
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.archive-pre.user=git
|
||||||
|
- docker-volume-backup.archive-pre=/bin/bash -c 'cd /tmp; /usr/local/bin/gitea dump -c /data/gitea/conf/app.ini -R -f dump.zip'
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure the user exists and is present in `passwd` inside the target container.
|
||||||
|
|
||||||
### Encrypting your backup using GPG
|
### Encrypting your backup using GPG
|
||||||
|
|
||||||
@@ -890,7 +959,7 @@ BACKUP_SOURCES=/backup/app2_data
|
|||||||
|
|
||||||
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
|
If you want to manage backup retention on different schedules, the most straight forward approach is to define a dedicated configuration for retention rule using a different prefix in the `BACKUP_FILENAME` parameter and then run them on different cron schedules.
|
||||||
|
|
||||||
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup.d`:
|
For example, if you wanted to keep daily backups for 7 days, weekly backups for a month, and retain monthly backups forever, you could create three configuration files and mount them into `/etc/dockervolumebackup/conf.d`:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
# 01daily.conf
|
# 01daily.conf
|
||||||
@@ -973,6 +1042,37 @@ volumes:
|
|||||||
|
|
||||||
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
|
Commands will be invoked with the filepath of the tar archive passed as `COMMAND_RUNTIME_BACKUP_FILEPATH`.
|
||||||
|
|
||||||
|
### Setup Dropbox storage backend
|
||||||
|
|
||||||
|
#### Auth-Setup:
|
||||||
|
|
||||||
|
1. Create a new Dropbox App in the [App Console](https://www.dropbox.com/developers/apps)
|
||||||
|
2. Open your new Dropbox App and set `DROPBOX_APP_KEY` and `DROPBOX_APP_SECRET` in your environment (e.g. docker-compose.yml) accordingly
|
||||||
|
3. Click on `Permissions` in your app and make sure, that the following permissions are cranted (or more):
|
||||||
|
- `files.metadata.write`
|
||||||
|
- `files.metadata.read`
|
||||||
|
- `files.content.write`
|
||||||
|
- `files.content.read`
|
||||||
|
4. Replace APPKEY in `https://www.dropbox.com/oauth2/authorize?client_id=APPKEY&token_access_type=offline&response_type=code` with the app key from step 2
|
||||||
|
5. Visit the URL and confirm the access of your app. This gives you an `auth code` -> save it somewhere!
|
||||||
|
6. Replace AUTHCODE, APPKEY, APPSECRET accordingly and perform the request:
|
||||||
|
```
|
||||||
|
curl https://api.dropbox.com/oauth2/token \
|
||||||
|
-d code=AUTHCODE \
|
||||||
|
-d grant_type=authorization_code \
|
||||||
|
-d client_id=APPKEY \
|
||||||
|
-d client_secret=APPSECRET
|
||||||
|
```
|
||||||
|
7. Execute the request. You will get a JSON formatted reply. Use the value of the `refresh_token` for the last environment variable `DROPBOX_REFRESH_TOKEN`
|
||||||
|
8. You should now have `DROPBOX_APP_KEY`, `DROPBOX_APP_SECRET` and `DROPBOX_REFRESH_TOKEN` set. These don't expire.
|
||||||
|
|
||||||
|
Note: Using the "Generated access token" in the app console is not supported, as it is only very short lived and therefore not suitable for an automatic backup solution. The refresh token handles this automatically - the setup procedure above is only needed once.
|
||||||
|
|
||||||
|
#### Other parameters
|
||||||
|
|
||||||
|
Important: If you chose `App folder` access during the creation of your Dropbox app in step 1 above, you can only write in the app's directory!
|
||||||
|
This means, that `DROPBOX_REMOTE_PATH` must start with e.g. `/Apps/YOUR_APP_NAME` or `/Apps/YOUR_APP_NAME/some_sub_dir`
|
||||||
|
|
||||||
## Recipes
|
## Recipes
|
||||||
|
|
||||||
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
This section lists configuration for some real-world use cases that you can mix and match according to your needs.
|
||||||
@@ -1140,6 +1240,30 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Backing up to Dropbox
|
||||||
|
|
||||||
|
See [Dropbox Setup](#setup-dropbox-storage-backend) on how to get the appropriate environment values.
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
DROPBOX_REFRESH_TOKEN: REFRESH_KEY # replace
|
||||||
|
DROPBOX_APP_KEY: APP_KEY # replace
|
||||||
|
DROPBOX_APP_SECRET: APP_SECRET # replace
|
||||||
|
DROPBOX_REMOTE_PATH: /Apps/my-test-app/some_subdir # replace
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
### Backing up locally
|
### Backing up locally
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
|
|||||||
@@ -15,9 +15,11 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
func createArchive(files []string, inputFilePath, outputFilePath string, compression string) error {
|
||||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
inputFilePath = stripTrailingSlashes(inputFilePath)
|
||||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -27,7 +29,7 @@ func createArchive(files []string, inputFilePath, outputFilePath string) error {
|
|||||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath)); err != nil {
|
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -51,18 +53,30 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
|||||||
return inputFilePath, outputFilePath, err
|
return inputFilePath, outputFilePath, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func compress(paths []string, outFilePath, subPath string) error {
|
func compress(paths []string, outFilePath, subPath string, algo string) error {
|
||||||
file, err := os.Create(outFilePath)
|
file, err := os.Create(outFilePath)
|
||||||
|
var compressWriter io.WriteCloser
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
return fmt.Errorf("compress: error creating out file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
prefix := path.Dir(outFilePath)
|
prefix := path.Dir(outFilePath)
|
||||||
gzipWriter := gzip.NewWriter(file)
|
switch algo {
|
||||||
tarWriter := tar.NewWriter(gzipWriter)
|
case "gz":
|
||||||
|
compressWriter = gzip.NewWriter(file)
|
||||||
|
case "zst":
|
||||||
|
compressWriter, err = zstd.NewWriter(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("compress: zstd error: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("compress: unsupported compression algorithm: %s", algo)
|
||||||
|
}
|
||||||
|
|
||||||
|
tarWriter := tar.NewWriter(compressWriter)
|
||||||
|
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
if err := writeTarGz(p, tarWriter, prefix); err != nil {
|
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
||||||
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -72,9 +86,9 @@ func compress(paths []string, outFilePath, subPath string) error {
|
|||||||
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gzipWriter.Close()
|
err = compressWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing gzip writer: %w", err)
|
return fmt.Errorf("compress: error closing compression writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = file.Close()
|
err = file.Close()
|
||||||
@@ -85,10 +99,10 @@ func compress(paths []string, outFilePath, subPath string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||||
fileInfo, err := os.Lstat(path)
|
fileInfo, err := os.Lstat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error getting file infor for %s: %w", path, err)
|
return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
@@ -99,19 +113,19 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
var err error
|
var err error
|
||||||
if link, err = os.Readlink(path); err != nil {
|
if link, err = os.Readlink(path); err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error resolving symlink %s: %w", path, err)
|
return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
header, err := tar.FileInfoHeader(fileInfo, link)
|
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error getting file info header: %w", err)
|
return fmt.Errorf("writeTarball: error getting file info header: %w", err)
|
||||||
}
|
}
|
||||||
header.Name = strings.TrimPrefix(path, prefix)
|
header.Name = strings.TrimPrefix(path, prefix)
|
||||||
|
|
||||||
err = tarWriter.WriteHeader(header)
|
err = tarWriter.WriteHeader(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error writing file info header: %w", err)
|
return fmt.Errorf("writeTarball: error writing file info header: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fileInfo.Mode().IsRegular() {
|
if !fileInfo.Mode().IsRegular() {
|
||||||
@@ -120,13 +134,13 @@ func writeTarGz(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error opening %s: %w", path, err)
|
return fmt.Errorf("writeTarball: error opening %s: %w", path, err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
_, err = io.Copy(tarWriter, file)
|
_, err = io.Copy(tarWriter, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarGz: error copying %s to tar writer: %w", path, err)
|
return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -10,64 +10,74 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
// by users.
|
// by users.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
AwsS3BucketName string `split_words:"true"`
|
AwsS3BucketName string `split_words:"true"`
|
||||||
AwsS3Path string `split_words:"true"`
|
AwsS3Path string `split_words:"true"`
|
||||||
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"`
|
||||||
AwsEndpointProto string `split_words:"true" default:"https"`
|
AwsEndpointProto string `split_words:"true" default:"https"`
|
||||||
AwsEndpointInsecure bool `split_words:"true"`
|
AwsEndpointInsecure bool `split_words:"true"`
|
||||||
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
|
AwsEndpointCACert CertDecoder `envconfig:"AWS_ENDPOINT_CA_CERT"`
|
||||||
AwsStorageClass string `split_words:"true"`
|
AwsStorageClass string `split_words:"true"`
|
||||||
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"`
|
||||||
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
|
AwsAccessKeyIDFile string `envconfig:"AWS_ACCESS_KEY_ID_FILE"`
|
||||||
AwsSecretAccessKey string `split_words:"true"`
|
AwsSecretAccessKey string `split_words:"true"`
|
||||||
AwsSecretAccessKeyFile string `split_words:"true"`
|
AwsSecretAccessKeyFile string `split_words:"true"`
|
||||||
AwsIamRoleEndpoint string `split_words:"true"`
|
AwsIamRoleEndpoint string `split_words:"true"`
|
||||||
BackupSources string `split_words:"true" default:"/backup"`
|
AwsPartSize int64 `split_words:"true"`
|
||||||
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"`
|
BackupCompression CompressionType `split_words:"true" default:"gz"`
|
||||||
BackupFilenameExpand bool `split_words:"true"`
|
BackupSources string `split_words:"true" default:"/backup"`
|
||||||
BackupLatestSymlink string `split_words:"true"`
|
BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.{{ .Extension }}"`
|
||||||
BackupArchive string `split_words:"true" default:"/archive"`
|
BackupFilenameExpand bool `split_words:"true"`
|
||||||
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
BackupLatestSymlink string `split_words:"true"`
|
||||||
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
BackupArchive string `split_words:"true" default:"/archive"`
|
||||||
BackupPruningPrefix string `split_words:"true"`
|
BackupRetentionDays int32 `split_words:"true" default:"-1"`
|
||||||
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
BackupPruningLeeway time.Duration `split_words:"true" default:"1m"`
|
||||||
BackupFromSnapshot bool `split_words:"true"`
|
BackupPruningPrefix string `split_words:"true"`
|
||||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
BackupStopContainerLabel string `split_words:"true" default:"true"`
|
||||||
GpgPassphrase string `split_words:"true"`
|
BackupFromSnapshot bool `split_words:"true"`
|
||||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
NotificationLevel string `split_words:"true" default:"error"`
|
GpgPassphrase string `split_words:"true"`
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
EmailNotificationSender string `split_words:"true" default:"noreply@nohost"`
|
||||||
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
EmailSMTPHost string `envconfig:"EMAIL_SMTP_HOST"`
|
||||||
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
EmailSMTPPort int `envconfig:"EMAIL_SMTP_PORT" default:"587"`
|
||||||
WebdavUrl string `split_words:"true"`
|
EmailSMTPUsername string `envconfig:"EMAIL_SMTP_USERNAME"`
|
||||||
WebdavUrlInsecure bool `split_words:"true"`
|
EmailSMTPPassword string `envconfig:"EMAIL_SMTP_PASSWORD"`
|
||||||
WebdavPath string `split_words:"true" default:"/"`
|
WebdavUrl string `split_words:"true"`
|
||||||
WebdavUsername string `split_words:"true"`
|
WebdavUrlInsecure bool `split_words:"true"`
|
||||||
WebdavPassword string `split_words:"true"`
|
WebdavPath string `split_words:"true" default:"/"`
|
||||||
SSHHostName string `split_words:"true"`
|
WebdavUsername string `split_words:"true"`
|
||||||
SSHPort string `split_words:"true" default:"22"`
|
WebdavPassword string `split_words:"true"`
|
||||||
SSHUser string `split_words:"true"`
|
SSHHostName string `split_words:"true"`
|
||||||
SSHPassword string `split_words:"true"`
|
SSHPort string `split_words:"true" default:"22"`
|
||||||
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
SSHUser string `split_words:"true"`
|
||||||
SSHIdentityPassphrase string `split_words:"true"`
|
SSHPassword string `split_words:"true"`
|
||||||
SSHRemotePath string `split_words:"true"`
|
SSHIdentityFile string `split_words:"true" default:"/root/.ssh/id_rsa"`
|
||||||
ExecLabel string `split_words:"true"`
|
SSHIdentityPassphrase string `split_words:"true"`
|
||||||
ExecForwardOutput bool `split_words:"true"`
|
SSHRemotePath string `split_words:"true"`
|
||||||
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
ExecLabel string `split_words:"true"`
|
||||||
AzureStorageAccountName string `split_words:"true"`
|
ExecForwardOutput bool `split_words:"true"`
|
||||||
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
AzureStorageContainerName string `split_words:"true"`
|
AzureStorageAccountName string `split_words:"true"`
|
||||||
AzureStoragePath string `split_words:"true"`
|
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
||||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
AzureStorageContainerName string `split_words:"true"`
|
||||||
|
AzureStoragePath string `split_words:"true"`
|
||||||
|
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||||
|
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||||
|
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||||
|
DropboxRefreshToken string `split_words:"true"`
|
||||||
|
DropboxAppKey string `split_words:"true"`
|
||||||
|
DropboxAppSecret string `split_words:"true"`
|
||||||
|
DropboxRemotePath string `split_words:"true"`
|
||||||
|
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
|
func (c *Config) resolveSecret(envVar string, secretPath string) (string, error) {
|
||||||
@@ -81,6 +91,22 @@ func (c *Config) resolveSecret(envVar string, secretPath string) (string, error)
|
|||||||
return string(data), nil
|
return string(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CompressionType string
|
||||||
|
|
||||||
|
func (c *CompressionType) Decode(v string) error {
|
||||||
|
switch v {
|
||||||
|
case "gz", "zst":
|
||||||
|
*c = CompressionType(v)
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("config: error decoding compression type %s", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *CompressionType) String() string {
|
||||||
|
return string(*c)
|
||||||
|
}
|
||||||
|
|
||||||
type CertDecoder struct {
|
type CertDecoder struct {
|
||||||
Cert *x509.Certificate
|
Cert *x509.Certificate
|
||||||
}
|
}
|
||||||
@@ -117,3 +143,21 @@ func (r *RegexpDecoder) Decode(v string) error {
|
|||||||
*r = RegexpDecoder{Re: re}
|
*r = RegexpDecoder{Re: re}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type NaturalNumber int
|
||||||
|
|
||||||
|
func (n *NaturalNumber) Decode(v string) error {
|
||||||
|
asInt, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("config: error converting %s to int", v)
|
||||||
|
}
|
||||||
|
if asInt <= 0 {
|
||||||
|
return fmt.Errorf("config: expected a natural number, got %d", asInt)
|
||||||
|
}
|
||||||
|
*n = NaturalNumber(asInt)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NaturalNumber) Int() int {
|
||||||
|
return int(*n)
|
||||||
|
}
|
||||||
|
|||||||
@@ -91,7 +91,6 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -105,7 +104,6 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Value: "docker-volume-backup.exec-pre",
|
Value: "docker-volume-backup.exec-pre",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -123,7 +121,6 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Value: "docker-volume-backup.exec-post",
|
Value: "docker-volume-backup.exec-post",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -163,7 +160,7 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
userLabelName := fmt.Sprintf("%s.user", label)
|
userLabelName := fmt.Sprintf("%s.user", label)
|
||||||
user := c.Labels[userLabelName]
|
user := c.Labels[userLabelName]
|
||||||
|
|
||||||
s.logger.Infof("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/"))
|
s.logger.Info(fmt.Sprintf("Running %s command %s for container %s", label, cmd, strings.TrimPrefix(c.Names[0], "/")))
|
||||||
stdout, stderr, err := s.exec(c.ID, cmd, user)
|
stdout, stderr, err := s.exec(c.ID, cmd, user)
|
||||||
if s.c.ExecForwardOutput {
|
if s.c.ExecForwardOutput {
|
||||||
os.Stderr.Write(stderr)
|
os.Stderr.Write(stderr)
|
||||||
|
|||||||
@@ -41,9 +41,11 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !s.encounteredLock {
|
if !s.encounteredLock {
|
||||||
s.logger.Infof(
|
s.logger.Info(
|
||||||
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
fmt.Sprintf(
|
||||||
s.c.LockTimeout,
|
"Exclusive lock was not available on first attempt. Will retry until it becomes available or the timeout of %s is exceeded.",
|
||||||
|
s.c.LockTimeout,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
s.encounteredLock = true
|
s.encounteredLock = true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,7 +22,9 @@ func main() {
|
|||||||
if pArg := recover(); pArg != nil {
|
if pArg := recover(); pArg != nil {
|
||||||
if err, ok := pArg.(error); ok {
|
if err, ok := pArg.(error); ok {
|
||||||
if hookErr := s.runHooks(err); hookErr != nil {
|
if hookErr := s.runHooks(err); hookErr != nil {
|
||||||
s.logger.Errorf("An error occurred calling the registered hooks: %s", hookErr)
|
s.logger.Error(
|
||||||
|
fmt.Sprintf("An error occurred calling the registered hooks: %s", hookErr),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -29,9 +32,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := s.runHooks(nil); err != nil {
|
if err := s.runHooks(nil); err != nil {
|
||||||
s.logger.Errorf(
|
s.logger.Error(
|
||||||
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
fmt.Sprintf(
|
||||||
err,
|
|
||||||
|
"Backup procedure ran successfully, but an error ocurred calling the registered hooks: %v",
|
||||||
|
err,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,11 +4,13 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -17,6 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/local"
|
"github.com/offen/docker-volume-backup/internal/storage/local"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/s3"
|
"github.com/offen/docker-volume-backup/internal/storage/s3"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
||||||
@@ -25,13 +28,13 @@ import (
|
|||||||
"github.com/containrrr/shoutrrr"
|
"github.com/containrrr/shoutrrr"
|
||||||
"github.com/containrrr/shoutrrr/pkg/router"
|
"github.com/containrrr/shoutrrr/pkg/router"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
ctr "github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/kelseyhightower/envconfig"
|
"github.com/kelseyhightower/envconfig"
|
||||||
"github.com/leekchan/timeutil"
|
"github.com/leekchan/timeutil"
|
||||||
"github.com/otiai10/copy"
|
"github.com/otiai10/copy"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/crypto/openpgp"
|
"golang.org/x/crypto/openpgp"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -41,7 +44,7 @@ import (
|
|||||||
type script struct {
|
type script struct {
|
||||||
cli *client.Client
|
cli *client.Client
|
||||||
storages []storage.Backend
|
storages []storage.Backend
|
||||||
logger *logrus.Logger
|
logger *slog.Logger
|
||||||
sender *router.ServiceRouter
|
sender *router.ServiceRouter
|
||||||
template *template.Template
|
template *template.Template
|
||||||
hooks []hook
|
hooks []hook
|
||||||
@@ -62,22 +65,18 @@ type script struct {
|
|||||||
func newScript() (*script, error) {
|
func newScript() (*script, error) {
|
||||||
stdOut, logBuffer := buffer(os.Stdout)
|
stdOut, logBuffer := buffer(os.Stdout)
|
||||||
s := &script{
|
s := &script{
|
||||||
c: &Config{},
|
c: &Config{},
|
||||||
logger: &logrus.Logger{
|
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||||
Out: stdOut,
|
|
||||||
Formatter: new(logrus.TextFormatter),
|
|
||||||
Hooks: make(logrus.LevelHooks),
|
|
||||||
Level: logrus.InfoLevel,
|
|
||||||
},
|
|
||||||
stats: &Stats{
|
stats: &Stats{
|
||||||
StartTime: time.Now(),
|
StartTime: time.Now(),
|
||||||
LogOutput: logBuffer,
|
LogOutput: logBuffer,
|
||||||
Storages: map[string]StorageStats{
|
Storages: map[string]StorageStats{
|
||||||
"S3": {},
|
"S3": {},
|
||||||
"WebDAV": {},
|
"WebDAV": {},
|
||||||
"SSH": {},
|
"SSH": {},
|
||||||
"Local": {},
|
"Local": {},
|
||||||
"Azure": {},
|
"Azure": {},
|
||||||
|
"Dropbox": {},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -93,6 +92,20 @@ func newScript() (*script, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.file = path.Join("/tmp", s.c.BackupFilename)
|
s.file = path.Join("/tmp", s.c.BackupFilename)
|
||||||
|
|
||||||
|
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||||
|
if tErr != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
var bf bytes.Buffer
|
||||||
|
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||||
|
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
||||||
|
}); tErr != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
||||||
|
}
|
||||||
|
s.file = bf.String()
|
||||||
|
|
||||||
if s.c.BackupFilenameExpand {
|
if s.c.BackupFilenameExpand {
|
||||||
s.file = os.ExpandEnv(s.file)
|
s.file = os.ExpandEnv(s.file)
|
||||||
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
s.c.BackupLatestSymlink = os.ExpandEnv(s.c.BackupLatestSymlink)
|
||||||
@@ -113,12 +126,11 @@ func newScript() (*script, error) {
|
|||||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||||
switch logType {
|
switch logType {
|
||||||
case storage.LogLevelWarning:
|
case storage.LogLevelWarning:
|
||||||
s.logger.Warnf("["+context+"] "+msg, params...)
|
s.logger.Warn(fmt.Sprintf("["+context+"] "+msg, params...))
|
||||||
case storage.LogLevelError:
|
case storage.LogLevelError:
|
||||||
s.logger.Errorf("["+context+"] "+msg, params...)
|
s.logger.Error(fmt.Sprintf("["+context+"] "+msg, params...))
|
||||||
case storage.LogLevelInfo:
|
|
||||||
default:
|
default:
|
||||||
s.logger.Infof("["+context+"] "+msg, params...)
|
s.logger.Info(fmt.Sprintf("["+context+"] "+msg, params...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,9 +154,10 @@ func newScript() (*script, error) {
|
|||||||
BucketName: s.c.AwsS3BucketName,
|
BucketName: s.c.AwsS3BucketName,
|
||||||
StorageClass: s.c.AwsStorageClass,
|
StorageClass: s.c.AwsStorageClass,
|
||||||
CACert: s.c.AwsEndpointCACert.Cert,
|
CACert: s.c.AwsEndpointCACert.Cert,
|
||||||
|
PartSize: s.c.AwsPartSize,
|
||||||
}
|
}
|
||||||
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
|
if s3Backend, err := s3.NewStorageBackend(s3Config, logFunc); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
||||||
} else {
|
} else {
|
||||||
s.storages = append(s.storages, s3Backend)
|
s.storages = append(s.storages, s3Backend)
|
||||||
}
|
}
|
||||||
@@ -159,7 +172,7 @@ func newScript() (*script, error) {
|
|||||||
RemotePath: s.c.WebdavPath,
|
RemotePath: s.c.WebdavPath,
|
||||||
}
|
}
|
||||||
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
|
if webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
||||||
} else {
|
} else {
|
||||||
s.storages = append(s.storages, webdavBackend)
|
s.storages = append(s.storages, webdavBackend)
|
||||||
}
|
}
|
||||||
@@ -176,7 +189,7 @@ func newScript() (*script, error) {
|
|||||||
RemotePath: s.c.SSHRemotePath,
|
RemotePath: s.c.SSHRemotePath,
|
||||||
}
|
}
|
||||||
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
|
if sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
||||||
} else {
|
} else {
|
||||||
s.storages = append(s.storages, sshBackend)
|
s.storages = append(s.storages, sshBackend)
|
||||||
}
|
}
|
||||||
@@ -201,11 +214,28 @@ func newScript() (*script, error) {
|
|||||||
}
|
}
|
||||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, azureBackend)
|
s.storages = append(s.storages, azureBackend)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if s.c.DropboxRefreshToken != "" && s.c.DropboxAppKey != "" && s.c.DropboxAppSecret != "" {
|
||||||
|
dropboxConfig := dropbox.Config{
|
||||||
|
Endpoint: s.c.DropboxEndpoint,
|
||||||
|
OAuth2Endpoint: s.c.DropboxOAuth2Endpoint,
|
||||||
|
RefreshToken: s.c.DropboxRefreshToken,
|
||||||
|
AppKey: s.c.DropboxAppKey,
|
||||||
|
AppSecret: s.c.DropboxAppSecret,
|
||||||
|
RemotePath: s.c.DropboxRemotePath,
|
||||||
|
ConcurrencyLevel: s.c.DropboxConcurrencyLevel.Int(),
|
||||||
|
}
|
||||||
|
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
||||||
|
}
|
||||||
|
s.storages = append(s.storages, dropboxBackend)
|
||||||
|
}
|
||||||
|
|
||||||
if s.c.EmailNotificationRecipient != "" {
|
if s.c.EmailNotificationRecipient != "" {
|
||||||
emailURL := fmt.Sprintf(
|
emailURL := fmt.Sprintf(
|
||||||
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
"smtp://%s:%s@%s:%d/?from=%s&to=%s",
|
||||||
@@ -280,9 +310,7 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
return noop, nil
|
return noop, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||||
Quiet: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
|
return noop, fmt.Errorf("stopContainers: error querying for containers: %w", err)
|
||||||
}
|
}
|
||||||
@@ -292,7 +320,6 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
s.c.BackupStopContainerLabel,
|
s.c.BackupStopContainerLabel,
|
||||||
)
|
)
|
||||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
||||||
Quiet: true,
|
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: containerLabel,
|
Value: containerLabel,
|
||||||
@@ -307,17 +334,19 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
return noop, nil
|
return noop, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Infof(
|
s.logger.Info(
|
||||||
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
fmt.Sprintf(
|
||||||
len(containersToStop),
|
"Stopping %d container(s) labeled `%s` out of %d running container(s).",
|
||||||
containerLabel,
|
len(containersToStop),
|
||||||
len(allContainers),
|
containerLabel,
|
||||||
|
len(allContainers),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
var stoppedContainers []types.Container
|
var stoppedContainers []types.Container
|
||||||
var stopErrors []error
|
var stopErrors []error
|
||||||
for _, container := range containersToStop {
|
for _, container := range containersToStop {
|
||||||
if err := s.cli.ContainerStop(context.Background(), container.ID, nil); err != nil {
|
if err := s.cli.ContainerStop(context.Background(), container.ID, ctr.StopOptions{}); err != nil {
|
||||||
stopErrors = append(stopErrors, err)
|
stopErrors = append(stopErrors, err)
|
||||||
} else {
|
} else {
|
||||||
stoppedContainers = append(stoppedContainers, container)
|
stoppedContainers = append(stoppedContainers, container)
|
||||||
@@ -366,7 +395,7 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
if serviceMatch.ID == "" {
|
if serviceMatch.ID == "" {
|
||||||
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
|
return fmt.Errorf("stopContainers: couldn't find service with name %s", serviceName)
|
||||||
}
|
}
|
||||||
serviceMatch.Spec.TaskTemplate.ForceUpdate = 1
|
serviceMatch.Spec.TaskTemplate.ForceUpdate += 1
|
||||||
if _, err := s.cli.ServiceUpdate(
|
if _, err := s.cli.ServiceUpdate(
|
||||||
context.Background(), serviceMatch.ID,
|
context.Background(), serviceMatch.ID,
|
||||||
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
serviceMatch.Version, serviceMatch.Spec, types.ServiceUpdateOptions{},
|
||||||
@@ -383,9 +412,11 @@ func (s *script) stopContainers() (func() error, error) {
|
|||||||
errors.Join(restartErrors...),
|
errors.Join(restartErrors...),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
s.logger.Infof(
|
s.logger.Info(
|
||||||
"Restarted %d container(s) and the matching service(s).",
|
fmt.Sprintf(
|
||||||
len(stoppedContainers),
|
"Restarted %d container(s) and the matching service(s).",
|
||||||
|
len(stoppedContainers),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
return nil
|
return nil
|
||||||
}, stopError
|
}, stopError
|
||||||
@@ -409,7 +440,9 @@ func (s *script) createArchive() error {
|
|||||||
if err := remove(backupSources); err != nil {
|
if err := remove(backupSources); err != nil {
|
||||||
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Removed snapshot `%s`.", backupSources)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||||
@@ -418,7 +451,9 @@ func (s *script) createArchive() error {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
tarFile := s.file
|
tarFile := s.file
|
||||||
@@ -426,7 +461,9 @@ func (s *script) createArchive() error {
|
|||||||
if err := remove(tarFile); err != nil {
|
if err := remove(tarFile); err != nil {
|
||||||
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Removed tar file `%s`.", tarFile)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -450,11 +487,13 @@ func (s *script) createArchive() error {
|
|||||||
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile); err != nil {
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String()); err != nil {
|
||||||
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Infof("Created backup of `%s` at `%s`.", backupSources, tarFile)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -471,7 +510,9 @@ func (s *script) encryptArchive() error {
|
|||||||
if err := remove(gpgFile); err != nil {
|
if err := remove(gpgFile); err != nil {
|
||||||
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Removed GPG file `%s`.", gpgFile)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -501,7 +542,9 @@ func (s *script) encryptArchive() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.file = gpgFile
|
s.file = gpgFile
|
||||||
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
||||||
|
)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -573,7 +616,9 @@ func (s *script) pruneBackups() error {
|
|||||||
// is non-nil.
|
// is non-nil.
|
||||||
func (s *script) must(err error) {
|
func (s *script) must(err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Errorf("Fatal error running backup: %s", err)
|
s.logger.Error(
|
||||||
|
fmt.Sprintf("Fatal error running backup: %s", err),
|
||||||
|
)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
73
go.mod
73
go.mod
@@ -1,68 +1,69 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.19
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
|
||||||
github.com/containrrr/shoutrrr v0.6.0
|
github.com/containrrr/shoutrrr v0.7.1
|
||||||
github.com/cosiner/argv v0.1.0
|
github.com/cosiner/argv v0.1.0
|
||||||
github.com/docker/docker v20.10.24+incompatible
|
github.com/docker/docker v24.0.5+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
|
github.com/klauspost/compress v1.16.7
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.44
|
github.com/minio/minio-go/v7 v7.0.62
|
||||||
github.com/otiai10/copy v1.10.0
|
github.com/otiai10/copy v1.11.0
|
||||||
github.com/pkg/sftp v1.13.5
|
github.com/pkg/sftp v1.13.6
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/studio-b12/gowebdav v0.9.0
|
||||||
github.com/studio-b12/gowebdav v0.0.0-20220128162035-c7b1ff8a5e62
|
golang.org/x/crypto v0.12.0
|
||||||
golang.org/x/crypto v0.3.0
|
golang.org/x/sync v0.3.0
|
||||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
|
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||||
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/docker/distribution v2.8.0+incompatible // indirect
|
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||||
github.com/fatih/color v1.10.0 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
|
github.com/fatih/color v1.13.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.6 // indirect
|
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.15.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.1 // indirect
|
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/onsi/gomega v1.10.3 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
|
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.4.0 // indirect
|
github.com/rs/xid v1.5.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/net v0.14.0 // indirect
|
||||||
golang.org/x/text v0.7.0 // indirect
|
golang.org/x/sys v0.11.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
golang.org/x/text v0.12.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
gotest.tools/v3 v3.0.3 // indirect
|
gotest.tools/v3 v3.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
260
internal/storage/dropbox/dropbox.go
Normal file
260
internal/storage/dropbox/dropbox.go
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
package dropbox
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||||
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dropboxStorage struct {
|
||||||
|
*storage.StorageBackend
|
||||||
|
client files.Client
|
||||||
|
concurrencyLevel int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config allows to configure a Dropbox storage backend.
|
||||||
|
type Config struct {
|
||||||
|
Endpoint string
|
||||||
|
OAuth2Endpoint string
|
||||||
|
RefreshToken string
|
||||||
|
AppKey string
|
||||||
|
AppSecret string
|
||||||
|
RemotePath string
|
||||||
|
ConcurrencyLevel int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorageBackend creates and initializes a new Dropbox storage backend.
|
||||||
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
|
tokenUrl, _ := url.JoinPath(opts.OAuth2Endpoint, "oauth2/token")
|
||||||
|
|
||||||
|
conf := &oauth2.Config{
|
||||||
|
ClientID: opts.AppKey,
|
||||||
|
ClientSecret: opts.AppSecret,
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
TokenURL: tokenUrl,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
logFunc(storage.LogLevelInfo, "Dropbox", "Fetching fresh access token for Dropbox storage backend.")
|
||||||
|
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
||||||
|
token, err := tkSource.Token()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dbxConfig := dropbox.Config{
|
||||||
|
Token: token.AccessToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Endpoint != "https://api.dropbox.com/" {
|
||||||
|
dbxConfig.URLGenerator = func(hostType string, namespace string, route string) string {
|
||||||
|
return fmt.Sprintf("%s/%d/%s/%s", opts.Endpoint, 2, namespace, route)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
client := files.New(dbxConfig)
|
||||||
|
|
||||||
|
if opts.ConcurrencyLevel < 1 {
|
||||||
|
logFunc(storage.LogLevelWarning, "Dropbox", "Concurrency level must be at least 1! Using 1 instead of %d.", opts.ConcurrencyLevel)
|
||||||
|
opts.ConcurrencyLevel = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return &dropboxStorage{
|
||||||
|
StorageBackend: &storage.StorageBackend{
|
||||||
|
DestinationPath: opts.RemotePath,
|
||||||
|
Log: logFunc,
|
||||||
|
},
|
||||||
|
client: client,
|
||||||
|
concurrencyLevel: opts.ConcurrencyLevel,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the storage backend
|
||||||
|
func (b *dropboxStorage) Name() string {
|
||||||
|
return "Dropbox"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies the given file to the WebDav storage backend.
|
||||||
|
func (b *dropboxStorage) Copy(file string) error {
|
||||||
|
_, name := path.Split(file)
|
||||||
|
|
||||||
|
folderArg := files.NewCreateFolderArg(b.DestinationPath)
|
||||||
|
if _, err := b.client.CreateFolderV2(folderArg); err != nil {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case files.CreateFolderV2APIError:
|
||||||
|
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
|
||||||
|
}
|
||||||
|
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists in Dropbox, no new directory required.", b.DestinationPath)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s' in Dropbox: %w", b.DestinationPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
// Start new upload session and get session id
|
||||||
|
|
||||||
|
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
|
var sessionId string
|
||||||
|
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||||
|
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
||||||
|
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err)
|
||||||
|
} else {
|
||||||
|
sessionId = res.SessionId
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the file in 148MB chunks (Dropbox API limit is 150MB, concurrent upload requires a multiple of 4MB though)
|
||||||
|
// Last append can be any size <= 150MB with Close=True
|
||||||
|
|
||||||
|
const chunkSize = 148 * 1024 * 1024 // 148MB
|
||||||
|
var offset uint64 = 0
|
||||||
|
var guard = make(chan struct{}, b.concurrencyLevel)
|
||||||
|
var errorChn = make(chan error, b.concurrencyLevel)
|
||||||
|
var EOFChn = make(chan bool, b.concurrencyLevel)
|
||||||
|
var mu sync.Mutex
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
guard <- struct{}{} // limit concurrency
|
||||||
|
select {
|
||||||
|
case err := <-errorChn: // error from goroutine
|
||||||
|
return err
|
||||||
|
case <-EOFChn: // EOF from goroutine
|
||||||
|
wg.Wait() // wait for all goroutines to finish
|
||||||
|
break loop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
wg.Done()
|
||||||
|
<-guard
|
||||||
|
}()
|
||||||
|
wg.Add(1)
|
||||||
|
chunk := make([]byte, chunkSize)
|
||||||
|
|
||||||
|
mu.Lock() // to preserve offset of chunks
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-EOFChn:
|
||||||
|
EOFChn <- true // put it back for outer loop
|
||||||
|
mu.Unlock()
|
||||||
|
return // already EOF
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesRead, err := r.Read(chunk)
|
||||||
|
if err != nil {
|
||||||
|
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err)
|
||||||
|
mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunk = chunk[:bytesRead]
|
||||||
|
|
||||||
|
uploadSessionAppendArg := files.NewUploadSessionAppendArg(
|
||||||
|
files.NewUploadSessionCursor(sessionId, offset),
|
||||||
|
)
|
||||||
|
isEOF := bytesRead < chunkSize
|
||||||
|
uploadSessionAppendArg.Close = isEOF
|
||||||
|
if isEOF {
|
||||||
|
EOFChn <- true
|
||||||
|
}
|
||||||
|
offset += uint64(bytesRead)
|
||||||
|
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
|
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
||||||
|
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finish the upload session, commit the file (no new data added)
|
||||||
|
|
||||||
|
_, err = b.client.UploadSessionFinish(
|
||||||
|
files.NewUploadSessionFinishArg(
|
||||||
|
files.NewUploadSessionCursor(sessionId, 0),
|
||||||
|
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||||
|
), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to Dropbox at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prune rotates away backups according to the configuration and provided deadline for the Dropbox storage backend.
|
||||||
|
func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
|
var entries []files.IsMetadata
|
||||||
|
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, res.Entries...)
|
||||||
|
|
||||||
|
for res.HasMore {
|
||||||
|
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
||||||
|
}
|
||||||
|
entries = append(entries, res.Entries...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var matches []*files.FileMetadata
|
||||||
|
var lenCandidates int
|
||||||
|
for _, candidate := range entries {
|
||||||
|
switch candidate := candidate.(type) {
|
||||||
|
case *files.FileMetadata:
|
||||||
|
if !strings.HasPrefix(candidate.Name, pruningPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lenCandidates++
|
||||||
|
if candidate.ServerModified.Before(deadline) {
|
||||||
|
matches = append(matches, candidate)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &storage.PruneStats{
|
||||||
|
Total: uint(lenCandidates),
|
||||||
|
Pruned: uint(len(matches)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.DoPrune(b.Name(), len(matches), lenCandidates, "Dropbox backup(s)", func() error {
|
||||||
|
for _, match := range matches {
|
||||||
|
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||||
|
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return stats, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
@@ -22,6 +23,7 @@ type s3Storage struct {
|
|||||||
client *minio.Client
|
client *minio.Client
|
||||||
bucket string
|
bucket string
|
||||||
storageClass string
|
storageClass string
|
||||||
|
partSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains values that define the configuration of a S3 backend.
|
// Config contains values that define the configuration of a S3 backend.
|
||||||
@@ -35,6 +37,7 @@ type Config struct {
|
|||||||
RemotePath string
|
RemotePath string
|
||||||
BucketName string
|
BucketName string
|
||||||
StorageClass string
|
StorageClass string
|
||||||
|
PartSize int64
|
||||||
CACert *x509.Certificate
|
CACert *x509.Certificate
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,6 +92,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
client: mc,
|
client: mc,
|
||||||
bucket: opts.BucketName,
|
bucket: opts.BucketName,
|
||||||
storageClass: opts.StorageClass,
|
storageClass: opts.StorageClass,
|
||||||
|
partSize: opts.PartSize,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,16 +104,32 @@ func (v *s3Storage) Name() string {
|
|||||||
// Copy copies the given file to the S3/Minio storage backend.
|
// Copy copies the given file to the S3/Minio storage backend.
|
||||||
func (b *s3Storage) Copy(file string) error {
|
func (b *s3Storage) Copy(file string) error {
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
|
putObjectOptions := minio.PutObjectOptions{
|
||||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{
|
|
||||||
ContentType: "application/tar+gzip",
|
ContentType: "application/tar+gzip",
|
||||||
StorageClass: b.storageClass,
|
StorageClass: b.storageClass,
|
||||||
}); err != nil {
|
}
|
||||||
|
|
||||||
|
if b.partSize > 0 {
|
||||||
|
srcFileInfo, err := os.Stat(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
putObjectOptions.PartSize = uint64(partSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
|
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ sleep 5
|
|||||||
|
|
||||||
expect_running_containers "3"
|
expect_running_containers "3"
|
||||||
|
|
||||||
docker-compose run --rm az_cli \
|
docker compose run --rm az_cli \
|
||||||
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
|
az storage blob download -f /dump/test.tar.gz -c test-container -n path/to/backup/test.tar.gz
|
||||||
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
tar -xvf ./local/test.tar.gz -C /tmp && test -f /tmp/backup/app_data/offen.db
|
||||||
|
|
||||||
@@ -26,7 +26,7 @@ pass "Found relevant files in untared remote backups."
|
|||||||
# The second part of this test checks if backups get deleted when the retention
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
# TODO: find out if we can test actual deletion without having to wait for a day
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
BACKUP_RETENTION_DAYS="0" docker-compose up -d
|
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
docker compose exec backup backup
|
docker compose exec backup backup
|
||||||
|
|||||||
66
test/cli-zstd/run.sh
Executable file
66
test/cli-zstd/run.sh
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd $(dirname $0)
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker network create test_network
|
||||||
|
docker volume create backup_data
|
||||||
|
docker volume create app_data
|
||||||
|
# This volume is created to test whether empty directories are handled
|
||||||
|
# correctly. It is not supposed to hold any data.
|
||||||
|
docker volume create empty_data
|
||||||
|
|
||||||
|
docker run -d \
|
||||||
|
--name minio \
|
||||||
|
--network test_network \
|
||||||
|
--env MINIO_ROOT_USER=test \
|
||||||
|
--env MINIO_ROOT_PASSWORD=test \
|
||||||
|
--env MINIO_ACCESS_KEY=test \
|
||||||
|
--env MINIO_SECRET_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
|
-v backup_data:/data \
|
||||||
|
minio/minio:RELEASE.2020-08-04T23-10-51Z server /data
|
||||||
|
|
||||||
|
docker exec minio mkdir -p /data/backup
|
||||||
|
|
||||||
|
docker run -d \
|
||||||
|
--name offen \
|
||||||
|
--network test_network \
|
||||||
|
-v app_data:/var/opt/offen/ \
|
||||||
|
offen/offen:latest
|
||||||
|
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
docker run --rm \
|
||||||
|
--network test_network \
|
||||||
|
-v app_data:/backup/app_data \
|
||||||
|
-v empty_data:/backup/empty_data \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--env AWS_ACCESS_KEY_ID=test \
|
||||||
|
--env AWS_SECRET_ACCESS_KEY=GMusLtUmILge2by+z890kQ \
|
||||||
|
--env AWS_ENDPOINT=minio:9000 \
|
||||||
|
--env AWS_ENDPOINT_PROTO=http \
|
||||||
|
--env AWS_S3_BUCKET_NAME=backup \
|
||||||
|
--env BACKUP_COMPRESSION=zst \
|
||||||
|
--env BACKUP_FILENAME='test.{{ .Extension }}' \
|
||||||
|
--env "BACKUP_FROM_SNAPSHOT=true" \
|
||||||
|
--entrypoint backup \
|
||||||
|
offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|
||||||
|
# Have to install tar and zstd on Alpine because the plain image comes with very
|
||||||
|
# basic tar from busybox and it does not seem to support zstd
|
||||||
|
docker run --rm \
|
||||||
|
-v backup_data:/data alpine \
|
||||||
|
ash -c 'apk add --no-cache zstd tar && tar -xvf /data/backup/test.tar.zst --zstd && test -f /backup/app_data/offen.db && test -d /backup/empty_data'
|
||||||
|
|
||||||
|
pass "Found relevant files in untared remote backup."
|
||||||
|
|
||||||
|
# This test does not stop containers during backup. This is happening on
|
||||||
|
# purpose in order to cover this setup as well.
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
docker rm $(docker stop minio offen)
|
||||||
|
docker volume rm backup_data app_data
|
||||||
|
docker network rm test_network
|
||||||
57
test/dropbox/docker-compose.yml
Normal file
57
test/dropbox/docker-compose.yml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
openapi_mock:
|
||||||
|
image: muonsoft/openapi-mock
|
||||||
|
environment:
|
||||||
|
OPENAPI_MOCK_USE_EXAMPLES: if_present
|
||||||
|
OPENAPI_MOCK_SPECIFICATION_URL: '/etc/openapi/user_v2.yaml'
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
volumes:
|
||||||
|
- ./user_v2.yaml:/etc/openapi/user_v2.yaml
|
||||||
|
|
||||||
|
oauth2_mock:
|
||||||
|
image: ghcr.io/navikt/mock-oauth2-server:1.0.0
|
||||||
|
ports:
|
||||||
|
- 8090:8090
|
||||||
|
environment:
|
||||||
|
PORT: 8090
|
||||||
|
JSON_CONFIG_PATH: '/etc/oauth2/config.json'
|
||||||
|
volumes:
|
||||||
|
- ./oauth2_config.json:/etc/oauth2/config.json
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
hostname: hostnametoken
|
||||||
|
depends_on:
|
||||||
|
- openapi_mock
|
||||||
|
- oauth2_mock
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
BACKUP_FILENAME: test-$$HOSTNAME.tar.gz
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
BACKUP_PRUNING_LEEWAY: 5s
|
||||||
|
BACKUP_PRUNING_PREFIX: test
|
||||||
|
DROPBOX_ENDPOINT: http://openapi_mock:8080
|
||||||
|
DROPBOX_OAUTH2_ENDPOINT: http://oauth2_mock:8090
|
||||||
|
DROPBOX_REFRESH_TOKEN: test
|
||||||
|
DROPBOX_APP_KEY: test
|
||||||
|
DROPBOX_APP_SECRET: test
|
||||||
|
DROPBOX_REMOTE_PATH: /test
|
||||||
|
DROPBOX_CONCURRENCY_LEVEL: 6
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
37
test/dropbox/oauth2_config.json
Normal file
37
test/dropbox/oauth2_config.json
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
{
|
||||||
|
"interactiveLogin": true,
|
||||||
|
"httpServer": "NettyWrapper",
|
||||||
|
"tokenCallbacks": [
|
||||||
|
{
|
||||||
|
"issuerId": "issuer1",
|
||||||
|
"tokenExpiry": 120,
|
||||||
|
"requestMappings": [
|
||||||
|
{
|
||||||
|
"requestParam": "scope",
|
||||||
|
"match": "scope1",
|
||||||
|
"claims": {
|
||||||
|
"sub": "subByScope",
|
||||||
|
"aud": [
|
||||||
|
"audByScope"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"issuerId": "issuer2",
|
||||||
|
"requestMappings": [
|
||||||
|
{
|
||||||
|
"requestParam": "someparam",
|
||||||
|
"match": "somevalue",
|
||||||
|
"claims": {
|
||||||
|
"sub": "subBySomeParam",
|
||||||
|
"aud": [
|
||||||
|
"audBySomeParam"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
40
test/dropbox/run.sh
Normal file
40
test/dropbox/run.sh
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
docker compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
logs=$(docker compose exec -T backup backup)
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "4"
|
||||||
|
|
||||||
|
echo "$logs"
|
||||||
|
if echo "$logs" | grep -q "ERROR"; then
|
||||||
|
fail "Backup failed, errors reported: $dvb_logs"
|
||||||
|
else
|
||||||
|
pass "Backup succeeded, no errors reported."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The second part of this test checks if backups get deleted when the retention
|
||||||
|
# is set to 0 days (which it should not as it would mean all backups get deleted)
|
||||||
|
# TODO: find out if we can test actual deletion without having to wait for a day
|
||||||
|
BACKUP_RETENTION_DAYS="0" docker compose up -d
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
logs=$(docker compose exec -T backup backup)
|
||||||
|
|
||||||
|
echo "$logs"
|
||||||
|
if echo "$logs" | grep -q "Refusing to do so, please check your configuration"; then
|
||||||
|
pass "Remote backups have not been deleted."
|
||||||
|
else
|
||||||
|
fail "Remote backups would have been deleted: $dvb_logs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker compose down --volumes
|
||||||
12758
test/dropbox/user_v2.yaml
Normal file
12758
test/dropbox/user_v2.yaml
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user