mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-05 17:18:02 +01:00
Compare commits
83 Commits
v2.37.0-al
...
v2.42.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a64da4b0b | ||
|
|
f97ce11734 | ||
|
|
336e12f874 | ||
|
|
016c6c8307 | ||
|
|
e22f317fbb | ||
|
|
e04bd2f066 | ||
|
|
c4eeaad813 | ||
|
|
5840f1c5dc | ||
|
|
d71b7304c2 | ||
|
|
fbc7f85d9f | ||
|
|
2af5bdf4d9 | ||
|
|
631ca3e07d | ||
|
|
3d35d7c00e | ||
|
|
954bde73fb | ||
|
|
ab46e96706 | ||
|
|
ab4ce94534 | ||
|
|
e4170addb6 | ||
|
|
b8410bbdc5 | ||
|
|
24e1341589 | ||
|
|
3d0286472b | ||
|
|
bb11ae035b | ||
|
|
9209037ed9 | ||
|
|
2e73dea4f7 | ||
|
|
7dc3ae17e7 | ||
|
|
9d5ea718a0 | ||
|
|
272495ae7d | ||
|
|
8beb28d4f8 | ||
|
|
0ec2e68076 | ||
|
|
b85afa6008 | ||
|
|
4cb47a4818 | ||
|
|
9b5ba8958d | ||
|
|
0327701e2d | ||
|
|
58f26ba004 | ||
|
|
f62ef6e05a | ||
|
|
40924434e4 | ||
|
|
e613f6046f | ||
|
|
292d47eb19 | ||
|
|
7637975e3f | ||
|
|
c47a14c53a | ||
|
|
9f795761d6 | ||
|
|
f2ef48803c | ||
|
|
8b69566291 | ||
|
|
bf79c913e0 | ||
|
|
2f7193aa9b | ||
|
|
550c4f520f | ||
|
|
1af472077c | ||
|
|
a077f12c11 | ||
|
|
cb5a38a1b7 | ||
|
|
b8995dbc51 | ||
|
|
baf34ec1f7 | ||
|
|
e8562b1785 | ||
|
|
5d7451410b | ||
|
|
440bcf76ce | ||
|
|
2d3e79cf5e | ||
|
|
5abfe5bb39 | ||
|
|
6c8b0ccce5 | ||
|
|
f4c61125af | ||
|
|
9b768c71e6 | ||
|
|
e8307a2b5b | ||
|
|
060a6daa7a | ||
|
|
4b3ca2ebb0 | ||
|
|
02ba9939a2 | ||
|
|
911fc5a223 | ||
|
|
f64aaa6e24 | ||
|
|
dd8ff5ee0c | ||
|
|
52c22a1891 | ||
|
|
83fa0aae48 | ||
|
|
c4e480dcfd | ||
|
|
a01fc3df3f | ||
|
|
37f9bd9a8f | ||
|
|
fb4663b087 | ||
|
|
0fe983dfcc | ||
|
|
5c8bc107de | ||
|
|
9a1e885138 | ||
|
|
241b5d2f25 | ||
|
|
aab47509d9 | ||
|
|
9b52c1f63e | ||
|
|
164d6df3b4 | ||
|
|
4c74313222 | ||
|
|
de03d4f704 | ||
|
|
65626dd3d4 | ||
|
|
69eceb3982 | ||
|
|
1d45062100 |
3
.github/workflows/deploy-docs.yml
vendored
3
.github/workflows/deploy-docs.yml
vendored
@@ -3,6 +3,9 @@ name: Deploy Documenation site to GitHub Pages
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ['main']
|
branches: ['main']
|
||||||
|
paths:
|
||||||
|
- 'docs/**'
|
||||||
|
- '.github/workflows/deploy-docs.yml'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
|
|||||||
4
.github/workflows/golangci-lint.yml
vendored
4
.github/workflows/golangci-lint.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.22'
|
||||||
cache: false
|
cache: false
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v3
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
# Require: The version of golangci-lint to use.
|
# Require: The version of golangci-lint to use.
|
||||||
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
# When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version.
|
||||||
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
# When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit.
|
||||||
version: v1.54
|
version: v1.57
|
||||||
|
|
||||||
# Optional: working directory, useful for monorepos
|
# Optional: working directory, useful for monorepos
|
||||||
# working-directory: somedir
|
# working-directory: somedir
|
||||||
|
|||||||
39
.github/workflows/release.yml
vendored
39
.github/workflows/release.yml
vendored
@@ -15,6 +15,38 @@ jobs:
|
|||||||
- name: Check out the repo
|
- name: Check out the repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: set Environment Variables
|
||||||
|
id: env
|
||||||
|
run: |
|
||||||
|
echo "NOW=$(date +'%F %Z %T')" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
# list of Docker images to use as base name for tags
|
||||||
|
images: |
|
||||||
|
offen/docker-volume-backup
|
||||||
|
ghcr.io/offen/docker-volume-backup
|
||||||
|
# define global behaviour for tags
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
|
# specify one tag which never gets set, to prevent the tag-attribute being empty, as it will fallback to a default
|
||||||
|
tags: |
|
||||||
|
# output v2.42.1-alpha.1 (incl. pre-releases)
|
||||||
|
type=semver,pattern=v{{version}},enable=false
|
||||||
|
labels: |
|
||||||
|
org.opencontainers.image.title=${{github.event.repository.name}}
|
||||||
|
org.opencontainers.image.description=Backup Docker volumes locally or to any S3, WebDAV, Azure Blob Storage, Dropbox or SSH compatible storage
|
||||||
|
org.opencontainers.image.vendor=${{github.repository_owner}}
|
||||||
|
org.opencontainers.image.licenses=MPL-2.0
|
||||||
|
org.opencontainers.image.version=${{github.ref_name}}
|
||||||
|
org.opencontainers.image.created=${{ env.NOW }}
|
||||||
|
org.opencontainers.image.source=${{github.server_url}}/${{github.repository}}
|
||||||
|
org.opencontainers.image.revision=${{github.sha}}
|
||||||
|
org.opencontainers.image.url=https://offen.github.io/docker-volume-backup/
|
||||||
|
org.opencontainers.image.documentation=https://offen.github.io/docker-volume-backup/
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
@@ -35,7 +67,7 @@ jobs:
|
|||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Extract Docker tags
|
- name: Extract Docker tags
|
||||||
id: meta
|
id: tags
|
||||||
run: |
|
run: |
|
||||||
version_tag="${{github.ref_name}}"
|
version_tag="${{github.ref_name}}"
|
||||||
tags=($version_tag)
|
tags=($version_tag)
|
||||||
@@ -51,9 +83,10 @@ jobs:
|
|||||||
echo "releases=$releases" >> "$GITHUB_OUTPUT"
|
echo "releases=$releases" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
- name: Build and push Docker images
|
- name: Build and push Docker images
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||||
tags: ${{ steps.meta.outputs.releases }}
|
tags: ${{ steps.tags.outputs.releases }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
|||||||
21
.github/workflows/unit.yml
vendored
Normal file
21
.github/workflows/unit.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
name: Run Unit Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.22.x'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: go mod download
|
||||||
|
- name: Test with the Go CLI
|
||||||
|
run: go test -v ./...
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Copyright 2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: MPL-2.0
|
# SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
FROM golang:1.21-alpine as builder
|
FROM golang:1.22-alpine as builder
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
@@ -9,11 +9,12 @@ RUN go mod download
|
|||||||
WORKDIR /app/cmd/backup
|
WORKDIR /app/cmd/backup
|
||||||
RUN go build -o backup .
|
RUN go build -o backup .
|
||||||
|
|
||||||
FROM alpine:3.19
|
FROM alpine:3.20
|
||||||
|
|
||||||
WORKDIR /root
|
WORKDIR /root
|
||||||
|
|
||||||
RUN apk add --no-cache ca-certificates
|
RUN apk add --no-cache ca-certificates && \
|
||||||
|
chmod a+rw /var/lock
|
||||||
|
|
||||||
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
COPY --from=builder /app/cmd/backup/backup /usr/bin/backup
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
<a href="https://www.offen.dev/">
|
<a href="https://www.offen.software/">
|
||||||
<img src="https://offen.github.io/press-kit/offen-material/gfx-GitHub-Offen-logo.svg" alt="Offen logo" title="Offen" width="150px"/>
|
<img src="https://offen.github.io/press-kit/avatars/avatar-OS-header.svg" alt="offen.software logo" title="offen.software" width="60px"/>
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
# docker-volume-backup
|
# docker-volume-backup
|
||||||
@@ -77,3 +77,8 @@ docker run --rm \
|
|||||||
```
|
```
|
||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||||
|
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License</a>.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
// Portions of this file are taken from package `targz`, Copyright (c) 2014 Fredrik Wallgren
|
||||||
@@ -16,23 +16,22 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/klauspost/pgzip"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
func createArchive(files []string, inputFilePath, outputFilePath string, compression string, compressionConcurrency int) error {
|
||||||
inputFilePath = stripTrailingSlashes(inputFilePath)
|
_, outputFilePath, err := makeAbsolute(stripTrailingSlashes(inputFilePath), outputFilePath)
|
||||||
inputFilePath, outputFilePath, err := makeAbsolute(inputFilePath, outputFilePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("createArchive: error transposing given file paths: %w", err)
|
return errwrap.Wrap(err, "error transposing given file paths")
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(outputFilePath), 0755); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating output file path: %w", err)
|
return errwrap.Wrap(err, "error creating output file path")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := compress(files, outputFilePath, filepath.Dir(inputFilePath), compression, compressionConcurrency); err != nil {
|
if err := compress(files, outputFilePath, compression, compressionConcurrency); err != nil {
|
||||||
return fmt.Errorf("createArchive: error creating archive: %w", err)
|
return errwrap.Wrap(err, "error creating archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -55,38 +54,38 @@ func makeAbsolute(inputFilePath, outputFilePath string) (string, string, error)
|
|||||||
return inputFilePath, outputFilePath, err
|
return inputFilePath, outputFilePath, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func compress(paths []string, outFilePath, subPath string, algo string, concurrency int) error {
|
func compress(paths []string, outFilePath, algo string, concurrency int) error {
|
||||||
file, err := os.Create(outFilePath)
|
file, err := os.Create(outFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error creating out file: %w", err)
|
return errwrap.Wrap(err, "error creating out file")
|
||||||
}
|
}
|
||||||
|
|
||||||
prefix := path.Dir(outFilePath)
|
prefix := path.Dir(outFilePath)
|
||||||
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
compressWriter, err := getCompressionWriter(file, algo, concurrency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error getting compression writer: %w", err)
|
return errwrap.Wrap(err, "error getting compression writer")
|
||||||
}
|
}
|
||||||
tarWriter := tar.NewWriter(compressWriter)
|
tarWriter := tar.NewWriter(compressWriter)
|
||||||
|
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
if err := writeTarball(p, tarWriter, prefix); err != nil {
|
||||||
return fmt.Errorf("compress: error writing %s to archive: %w", p, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error writing %s to archive", p))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tarWriter.Close()
|
err = tarWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing tar writer: %w", err)
|
return errwrap.Wrap(err, "error closing tar writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = compressWriter.Close()
|
err = compressWriter.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing compression writer: %w", err)
|
return errwrap.Wrap(err, "error closing compression writer")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = file.Close()
|
err = file.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("compress: error closing file: %w", err)
|
return errwrap.Wrap(err, "error closing file")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -94,10 +93,12 @@ func compress(paths []string, outFilePath, subPath string, algo string, concurre
|
|||||||
|
|
||||||
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
func getCompressionWriter(file *os.File, algo string, concurrency int) (io.WriteCloser, error) {
|
||||||
switch algo {
|
switch algo {
|
||||||
|
case "none":
|
||||||
|
return &passThroughWriteCloser{file}, nil
|
||||||
case "gz":
|
case "gz":
|
||||||
w, err := pgzip.NewWriterLevel(file, 5)
|
w, err := pgzip.NewWriterLevel(file, 5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: gzip error: %w", err)
|
return nil, errwrap.Wrap(err, "gzip error")
|
||||||
}
|
}
|
||||||
|
|
||||||
if concurrency == 0 {
|
if concurrency == 0 {
|
||||||
@@ -105,25 +106,25 @@ func getCompressionWriter(file *os.File, algo string, concurrency int) (io.Write
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
if err := w.SetConcurrency(1<<20, concurrency); err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: error setting concurrency: %w", err)
|
return nil, errwrap.Wrap(err, "error setting concurrency")
|
||||||
}
|
}
|
||||||
|
|
||||||
return w, nil
|
return w, nil
|
||||||
case "zst":
|
case "zst":
|
||||||
compressWriter, err := zstd.NewWriter(file)
|
compressWriter, err := zstd.NewWriter(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getCompressionWriter: zstd error: %w", err)
|
return nil, errwrap.Wrap(err, "zstd error")
|
||||||
}
|
}
|
||||||
return compressWriter, nil
|
return compressWriter, nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("getCompressionWriter: unsupported compression algorithm: %s", algo)
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("unsupported compression algorithm: %s", algo))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
||||||
fileInfo, err := os.Lstat(path)
|
fileInfo, err := os.Lstat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error getting file infor for %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error getting file info for %s", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
if fileInfo.Mode()&os.ModeSocket == os.ModeSocket {
|
||||||
@@ -134,19 +135,19 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||||
var err error
|
var err error
|
||||||
if link, err = os.Readlink(path); err != nil {
|
if link, err = os.Readlink(path); err != nil {
|
||||||
return fmt.Errorf("writeTarball: error resolving symlink %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error resolving symlink %s", path))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
header, err := tar.FileInfoHeader(fileInfo, link)
|
header, err := tar.FileInfoHeader(fileInfo, link)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error getting file info header: %w", err)
|
return errwrap.Wrap(err, "error getting file info header")
|
||||||
}
|
}
|
||||||
header.Name = strings.TrimPrefix(path, prefix)
|
header.Name = strings.TrimPrefix(path, prefix)
|
||||||
|
|
||||||
err = tarWriter.WriteHeader(header)
|
err = tarWriter.WriteHeader(header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error writing file info header: %w", err)
|
return errwrap.Wrap(err, "error writing file info header")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fileInfo.Mode().IsRegular() {
|
if !fileInfo.Mode().IsRegular() {
|
||||||
@@ -155,14 +156,26 @@ func writeTarball(path string, tarWriter *tar.Writer, prefix string) error {
|
|||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error opening %s: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error opening %s", path))
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
_, err = io.Copy(tarWriter, file)
|
_, err = io.Copy(tarWriter, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("writeTarball: error copying %s to tar writer: %w", path, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error copying %s to tar writer", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type passThroughWriteCloser struct {
|
||||||
|
target io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Write(b []byte) (int, error) {
|
||||||
|
return p.target.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *passThroughWriteCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
152
cmd/backup/command.go
Normal file
152
cmd/backup/command.go
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type command struct {
|
||||||
|
logger *slog.Logger
|
||||||
|
schedules []cron.EntryID
|
||||||
|
cr *cron.Cron
|
||||||
|
reload chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCommand() *command {
|
||||||
|
return &command{
|
||||||
|
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// runAsCommand executes a backup run for each configuration that is available
|
||||||
|
// and then returns
|
||||||
|
func (c *command) runAsCommand() error {
|
||||||
|
configurations, err := sourceConfiguration(configStrategyEnv)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error loading env vars")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, config := range configurations {
|
||||||
|
if err := runScript(config); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error running script")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type foregroundOpts struct {
|
||||||
|
profileCronExpression string
|
||||||
|
}
|
||||||
|
|
||||||
|
// runInForeground starts the program as a long running process, scheduling
|
||||||
|
// a job for each configuration that is available.
|
||||||
|
func (c *command) runInForeground(opts foregroundOpts) error {
|
||||||
|
c.cr = cron.New(
|
||||||
|
cron.WithParser(
|
||||||
|
cron.NewParser(
|
||||||
|
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := c.schedule(configStrategyConfd); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error scheduling")
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.profileCronExpression != "" {
|
||||||
|
if _, err := c.cr.AddFunc(opts.profileCronExpression, c.profile); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error adding profiling job")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var quit = make(chan os.Signal, 1)
|
||||||
|
c.reload = make(chan struct{}, 1)
|
||||||
|
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
c.cr.Start()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-quit:
|
||||||
|
ctx := c.cr.Stop()
|
||||||
|
<-ctx.Done()
|
||||||
|
return nil
|
||||||
|
case <-c.reload:
|
||||||
|
if err := c.schedule(configStrategyConfd); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error reloading configuration")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// schedule wipes all existing schedules and enqueues all schedules available
|
||||||
|
// using the given configuration strategy
|
||||||
|
func (c *command) schedule(strategy configStrategy) error {
|
||||||
|
for _, id := range c.schedules {
|
||||||
|
c.cr.Remove(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
configurations, err := sourceConfiguration(strategy)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error sourcing configuration")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cfg := range configurations {
|
||||||
|
config := cfg
|
||||||
|
id, err := c.cr.AddFunc(config.BackupCronExpression, func() {
|
||||||
|
c.logger.Info(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Now running script on schedule %s",
|
||||||
|
config.BackupCronExpression,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := runScript(config); err != nil {
|
||||||
|
c.logger.Error(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Unexpected error running schedule %s: %v",
|
||||||
|
config.BackupCronExpression,
|
||||||
|
errwrap.Unwrap(err),
|
||||||
|
),
|
||||||
|
"error",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, fmt.Sprintf("error adding schedule %s", config.BackupCronExpression))
|
||||||
|
}
|
||||||
|
c.logger.Info(fmt.Sprintf("Successfully scheduled backup %s with expression %s", config.source, config.BackupCronExpression))
|
||||||
|
if ok := checkCronSchedule(config.BackupCronExpression); !ok {
|
||||||
|
c.logger.Warn(
|
||||||
|
fmt.Sprintf("Scheduled cron expression %s will never run, is this intentional?", config.BackupCronExpression),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
c.schedules = append(c.schedules, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// must exits the program when passed an error. It should be the only
|
||||||
|
// place where the application exits forcefully.
|
||||||
|
func (c *command) must(err error) {
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Error(
|
||||||
|
fmt.Sprintf("Fatal error running command: %v", errwrap.Unwrap(err)),
|
||||||
|
"error",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -11,6 +11,8 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds all configuration values that are expected to be set
|
// Config holds all configuration values that are expected to be set
|
||||||
@@ -45,6 +47,7 @@ type Config struct {
|
|||||||
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
BackupExcludeRegexp RegexpDecoder `split_words:"true"`
|
||||||
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
BackupSkipBackendsFromPrune []string `split_words:"true"`
|
||||||
GpgPassphrase string `split_words:"true"`
|
GpgPassphrase string `split_words:"true"`
|
||||||
|
GpgPublicKeyRing string `split_words:"true"`
|
||||||
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
NotificationURLs []string `envconfig:"NOTIFICATION_URLS"`
|
||||||
NotificationLevel string `split_words:"true" default:"error"`
|
NotificationLevel string `split_words:"true" default:"error"`
|
||||||
EmailNotificationRecipient string `split_words:"true"`
|
EmailNotificationRecipient string `split_words:"true"`
|
||||||
@@ -70,9 +73,11 @@ type Config struct {
|
|||||||
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
LockTimeout time.Duration `split_words:"true" default:"60m"`
|
||||||
AzureStorageAccountName string `split_words:"true"`
|
AzureStorageAccountName string `split_words:"true"`
|
||||||
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
AzureStoragePrimaryAccountKey string `split_words:"true"`
|
||||||
|
AzureStorageConnectionString string `split_words:"true"`
|
||||||
AzureStorageContainerName string `split_words:"true"`
|
AzureStorageContainerName string `split_words:"true"`
|
||||||
AzureStoragePath string `split_words:"true"`
|
AzureStoragePath string `split_words:"true"`
|
||||||
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
AzureStorageEndpoint string `split_words:"true" default:"https://{{ .AccountName }}.blob.core.windows.net/"`
|
||||||
|
AzureStorageAccessTier string `split_words:"true"`
|
||||||
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
DropboxEndpoint string `split_words:"true" default:"https://api.dropbox.com/"`
|
||||||
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
DropboxOAuth2Endpoint string `envconfig:"DROPBOX_OAUTH2_ENDPOINT" default:"https://api.dropbox.com/"`
|
||||||
DropboxRefreshToken string `split_words:"true"`
|
DropboxRefreshToken string `split_words:"true"`
|
||||||
@@ -80,17 +85,19 @@ type Config struct {
|
|||||||
DropboxAppSecret string `split_words:"true"`
|
DropboxAppSecret string `split_words:"true"`
|
||||||
DropboxRemotePath string `split_words:"true"`
|
DropboxRemotePath string `split_words:"true"`
|
||||||
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
DropboxConcurrencyLevel NaturalNumber `split_words:"true" default:"6"`
|
||||||
|
source string
|
||||||
|
additionalEnvVars map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CompressionType string
|
type CompressionType string
|
||||||
|
|
||||||
func (c *CompressionType) Decode(v string) error {
|
func (c *CompressionType) Decode(v string) error {
|
||||||
switch v {
|
switch v {
|
||||||
case "gz", "zst":
|
case "none", "gz", "zst":
|
||||||
*c = CompressionType(v)
|
*c = CompressionType(v)
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("config: error decoding compression type %s", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error decoding compression type %s", v))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,7 +120,7 @@ func (c *CertDecoder) Decode(v string) error {
|
|||||||
block, _ := pem.Decode(content)
|
block, _ := pem.Decode(content)
|
||||||
cert, err := x509.ParseCertificate(block.Bytes)
|
cert, err := x509.ParseCertificate(block.Bytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error parsing certificate: %w", err)
|
return errwrap.Wrap(err, "error parsing certificate")
|
||||||
}
|
}
|
||||||
*c = CertDecoder{Cert: cert}
|
*c = CertDecoder{Cert: cert}
|
||||||
return nil
|
return nil
|
||||||
@@ -129,7 +136,7 @@ func (r *RegexpDecoder) Decode(v string) error {
|
|||||||
}
|
}
|
||||||
re, err := regexp.Compile(v)
|
re, err := regexp.Compile(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error compiling given regexp `%s`: %w", v, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error compiling given regexp `%s`", v))
|
||||||
}
|
}
|
||||||
*r = RegexpDecoder{Re: re}
|
*r = RegexpDecoder{Re: re}
|
||||||
return nil
|
return nil
|
||||||
@@ -141,10 +148,10 @@ type NaturalNumber int
|
|||||||
func (n *NaturalNumber) Decode(v string) error {
|
func (n *NaturalNumber) Decode(v string) error {
|
||||||
asInt, err := strconv.Atoi(v)
|
asInt, err := strconv.Atoi(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error converting %s to int", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||||
}
|
}
|
||||||
if asInt <= 0 {
|
if asInt <= 0 {
|
||||||
return fmt.Errorf("config: expected a natural number, got %d", asInt)
|
return errwrap.Wrap(nil, fmt.Sprintf("expected a natural number, got %d", asInt))
|
||||||
}
|
}
|
||||||
*n = NaturalNumber(asInt)
|
*n = NaturalNumber(asInt)
|
||||||
return nil
|
return nil
|
||||||
@@ -160,10 +167,10 @@ type WholeNumber int
|
|||||||
func (n *WholeNumber) Decode(v string) error {
|
func (n *WholeNumber) Decode(v string) error {
|
||||||
asInt, err := strconv.Atoi(v)
|
asInt, err := strconv.Atoi(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("config: error converting %s to int", v)
|
return errwrap.Wrap(nil, fmt.Sprintf("error converting %s to int", v))
|
||||||
}
|
}
|
||||||
if asInt < 0 {
|
if asInt < 0 {
|
||||||
return fmt.Errorf("config: expected a whole, positive number, including zero. Got %d", asInt)
|
return errwrap.Wrap(nil, fmt.Sprintf("expected a whole, positive number, including zero. Got %d", asInt))
|
||||||
}
|
}
|
||||||
*n = WholeNumber(asInt)
|
*n = WholeNumber(asInt)
|
||||||
return nil
|
return nil
|
||||||
@@ -172,3 +179,40 @@ func (n *WholeNumber) Decode(v string) error {
|
|||||||
func (n *WholeNumber) Int() int {
|
func (n *WholeNumber) Int() int {
|
||||||
return int(*n)
|
return int(*n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type envVarLookup struct {
|
||||||
|
ok bool
|
||||||
|
key string
|
||||||
|
value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyEnv sets the values in `additionalEnvVars` as environment variables.
|
||||||
|
// It returns a function that reverts all values that have been set to its
|
||||||
|
// previous state.
|
||||||
|
func (c *Config) applyEnv() (func() error, error) {
|
||||||
|
lookups := []envVarLookup{}
|
||||||
|
|
||||||
|
unset := func() error {
|
||||||
|
for _, lookup := range lookups {
|
||||||
|
if !lookup.ok {
|
||||||
|
if err := os.Unsetenv(lookup.key); err != nil {
|
||||||
|
return errwrap.Wrap(err, fmt.Sprintf("error unsetting env var %s", lookup.key))
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := os.Setenv(lookup.key, lookup.value); err != nil {
|
||||||
|
return errwrap.Wrap(err, fmt.Sprintf("error setting back env var %s", lookup.key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range c.additionalEnvVars {
|
||||||
|
current, ok := os.LookupEnv(key)
|
||||||
|
lookups = append(lookups, envVarLookup{ok: ok, key: key, value: current})
|
||||||
|
if err := os.Setenv(key, value); err != nil {
|
||||||
|
return unset, errwrap.Wrap(err, "error setting env var")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return unset, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,20 +1,54 @@
|
|||||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/envconfig"
|
"github.com/offen/envconfig"
|
||||||
|
shell "mvdan.cc/sh/v3/shell"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type configStrategy string
|
||||||
|
|
||||||
|
const (
|
||||||
|
configStrategyEnv configStrategy = "env"
|
||||||
|
configStrategyConfd configStrategy = "confd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// sourceConfiguration returns a list of config objects using the given
|
||||||
|
// strategy. It should be the single entrypoint for retrieving configuration
|
||||||
|
// for all consumers.
|
||||||
|
func sourceConfiguration(strategy configStrategy) ([]*Config, error) {
|
||||||
|
switch strategy {
|
||||||
|
case configStrategyEnv:
|
||||||
|
c, err := loadConfigFromEnvVars()
|
||||||
|
return []*Config{c}, err
|
||||||
|
case configStrategyConfd:
|
||||||
|
cs, err := loadConfigsFromEnvFiles("/etc/dockervolumebackup/conf.d")
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return sourceConfiguration(configStrategyEnv)
|
||||||
|
}
|
||||||
|
return nil, errwrap.Wrap(err, "error loading config files")
|
||||||
|
}
|
||||||
|
return cs, nil
|
||||||
|
default:
|
||||||
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("received unknown config strategy: %v", strategy))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
|
// envProxy is a function that mimics os.LookupEnv but can read values from any other source
|
||||||
type envProxy func(string) (string, bool)
|
type envProxy func(string) (string, bool)
|
||||||
|
|
||||||
|
// loadConfig creates a config object using the given lookup function
|
||||||
func loadConfig(lookup envProxy) (*Config, error) {
|
func loadConfig(lookup envProxy) (*Config, error) {
|
||||||
envconfig.Lookup = func(key string) (string, bool) {
|
envconfig.Lookup = func(key string) (string, bool) {
|
||||||
value, okValue := lookup(key)
|
value, okValue := lookup(key)
|
||||||
@@ -38,45 +72,95 @@ func loadConfig(lookup envProxy) (*Config, error) {
|
|||||||
|
|
||||||
var c = &Config{}
|
var c = &Config{}
|
||||||
if err := envconfig.Process("", c); err != nil {
|
if err := envconfig.Process("", c); err != nil {
|
||||||
return nil, fmt.Errorf("loadConfig: failed to process configuration values: %w", err)
|
return nil, errwrap.Wrap(err, "failed to process configuration values")
|
||||||
}
|
}
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadEnvVars() (*Config, error) {
|
func loadConfigFromEnvVars() (*Config, error) {
|
||||||
return loadConfig(os.LookupEnv)
|
c, err := loadConfig(os.LookupEnv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "error loading config from environment")
|
||||||
|
}
|
||||||
|
c.source = "from environment"
|
||||||
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadEnvFiles(directory string) ([]*Config, error) {
|
func loadConfigsFromEnvFiles(directory string) ([]*Config, error) {
|
||||||
items, err := os.ReadDir(directory)
|
items, err := os.ReadDir(directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("loadEnvFiles: failed to read files from env directory: %w", err)
|
return nil, errwrap.Wrap(err, "failed to read files from env directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cs = make([]*Config, 0)
|
configs := []*Config{}
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.IsDir() {
|
if item.IsDir() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p := filepath.Join(directory, item.Name())
|
p := filepath.Join(directory, item.Name())
|
||||||
envFile, err := godotenv.Read(p)
|
envFile, err := source(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvFiles: error reading config file %s: %w", p, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error reading config file %s", p))
|
||||||
}
|
}
|
||||||
lookup := func(key string) (string, bool) {
|
lookup := func(key string) (string, bool) {
|
||||||
val, ok := envFile[key]
|
val, ok := envFile[key]
|
||||||
return val, ok
|
if ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
return os.LookupEnv(key)
|
||||||
}
|
}
|
||||||
c, err := loadConfig(lookup)
|
c, err := loadConfig(lookup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("loadEnvFiles: error loading config from file %s: %w", p, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error loading config from file %s", p))
|
||||||
}
|
}
|
||||||
cs = append(cs, c)
|
c.source = item.Name()
|
||||||
|
c.additionalEnvVars = envFile
|
||||||
|
configs = append(configs, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cs, nil
|
return configs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// source tries to mimic the pre v2.37.0 behavior of calling
|
||||||
|
// `set +a; source $path; set -a` and returns the env vars as a map
|
||||||
|
func source(path string) (map[string]string, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error opening %s", path))
|
||||||
|
}
|
||||||
|
|
||||||
|
result := map[string]string{}
|
||||||
|
scanner := bufio.NewScanner(f)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
if strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
withExpansion, err := shell.Expand(line, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "error expanding env")
|
||||||
|
}
|
||||||
|
m, err := godotenv.Unmarshal(withExpansion)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error sourcing %s", path))
|
||||||
|
}
|
||||||
|
for key, value := range m {
|
||||||
|
currentValue, currentOk := os.LookupEnv(key)
|
||||||
|
defer func() {
|
||||||
|
if currentOk {
|
||||||
|
os.Setenv(key, currentValue)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
os.Unsetenv(key)
|
||||||
|
}()
|
||||||
|
result[key] = value
|
||||||
|
os.Setenv(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|||||||
77
cmd/backup/config_provider_test.go
Normal file
77
cmd/backup/config_provider_test.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSource(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expectError bool
|
||||||
|
expectedOutput map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"default",
|
||||||
|
"testdata/default.env",
|
||||||
|
false,
|
||||||
|
map[string]string{
|
||||||
|
"FOO": "bar",
|
||||||
|
"BAZ": "qux",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"not found",
|
||||||
|
"testdata/nope.env",
|
||||||
|
true,
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"braces",
|
||||||
|
"testdata/braces.env",
|
||||||
|
false,
|
||||||
|
map[string]string{
|
||||||
|
"FOO": "qux",
|
||||||
|
"BAR": "xxx",
|
||||||
|
"BAZ": "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"expansion",
|
||||||
|
"testdata/expansion.env",
|
||||||
|
false,
|
||||||
|
map[string]string{
|
||||||
|
"BAR": "xxx",
|
||||||
|
"FOO": "xxx",
|
||||||
|
"BAZ": "xxx",
|
||||||
|
"QUX": "yyy",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"comments",
|
||||||
|
"testdata/comments.env",
|
||||||
|
false,
|
||||||
|
map[string]string{
|
||||||
|
"BAR": "xxx",
|
||||||
|
"BAZ": "yyy",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Setenv("QUX", "yyy")
|
||||||
|
defer os.Unsetenv("QUX")
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
result, err := source(test.input)
|
||||||
|
if (err != nil) != test.expectError {
|
||||||
|
t.Errorf("Unexpected error value %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expectedOutput, result) {
|
||||||
|
t.Errorf("Expected %v, got %v", test.expectedOutput, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
41
cmd/backup/copy_archive.go
Normal file
41
cmd/backup/copy_archive.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copyArchive makes sure the backup file is copied to both local and remote locations
|
||||||
|
// as per the given configuration.
|
||||||
|
func (s *script) copyArchive() error {
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
if stat, err := os.Stat(s.file); err != nil {
|
||||||
|
return errwrap.Wrap(err, "unable to stat backup file")
|
||||||
|
} else {
|
||||||
|
size := stat.Size()
|
||||||
|
s.stats.BackupFile = BackupFileStats{
|
||||||
|
Size: uint64(size),
|
||||||
|
Name: name,
|
||||||
|
FullPath: s.file,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
eg := errgroup.Group{}
|
||||||
|
for _, backend := range s.storages {
|
||||||
|
b := backend
|
||||||
|
eg.Go(func() error {
|
||||||
|
return b.Copy(s.file)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error copying archive")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
88
cmd/backup/create_archive.go
Normal file
88
cmd/backup/create_archive.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
"github.com/otiai10/copy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// createArchive creates a tar archive of the configured backup location and
|
||||||
|
// saves it to disk.
|
||||||
|
func (s *script) createArchive() error {
|
||||||
|
backupSources := s.c.BackupSources
|
||||||
|
|
||||||
|
if s.c.BackupFromSnapshot {
|
||||||
|
s.logger.Warn(
|
||||||
|
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
||||||
|
)
|
||||||
|
s.logger.Warn(
|
||||||
|
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
|
||||||
|
)
|
||||||
|
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
||||||
|
// copy before compressing guard against a situation where backup folder's content are still growing.
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(backupSources); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error removing snapshot")
|
||||||
|
}
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
||||||
|
PreserveTimes: true,
|
||||||
|
PreserveOwner: true,
|
||||||
|
}); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error creating snapshot")
|
||||||
|
}
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
tarFile := s.file
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(tarFile); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error removing tar file")
|
||||||
|
}
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error getting absolute path")
|
||||||
|
}
|
||||||
|
|
||||||
|
var filesEligibleForBackup []string
|
||||||
|
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error walking filesystem tree")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error compressing backup folder")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
129
cmd/backup/encrypt_archive.go
Normal file
129
cmd/backup/encrypt_archive.go
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||||
|
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *script) encryptAsymmetrically(outFile *os.File) (io.WriteCloser, func() error, error) {
|
||||||
|
|
||||||
|
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewReader([]byte(s.c.GpgPublicKeyRing)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrap(err, "error parsing armored keyring")
|
||||||
|
}
|
||||||
|
|
||||||
|
armoredWriter, err := armor.Encode(outFile, "PGP MESSAGE", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, errwrap.Wrap(err, "error preparing encryption")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.Encrypt(armoredWriter, entityList, nil, nil, &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, func() error {
|
||||||
|
if err := dst.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return armoredWriter.Close()
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) encryptSymmetrically(outFile *os.File) (io.WriteCloser, func() error, error) {
|
||||||
|
|
||||||
|
_, name := path.Split(s.file)
|
||||||
|
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
||||||
|
FileName: name,
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, dst.Close, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// encryptArchive encrypts the backup file using PGP and the configured passphrase or publickey(s).
|
||||||
|
// In case no passphrase or publickey is given it returns early, leaving the backup file
|
||||||
|
// untouched.
|
||||||
|
func (s *script) encryptArchive() error {
|
||||||
|
|
||||||
|
var encrypt func(outFile *os.File) (io.WriteCloser, func() error, error)
|
||||||
|
var cleanUpErr error
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case s.c.GpgPassphrase != "" && s.c.GpgPublicKeyRing != "":
|
||||||
|
return errwrap.Wrap(nil, "error in selecting asymmetric and symmetric encryption methods: conflicting env vars are set")
|
||||||
|
case s.c.GpgPassphrase != "":
|
||||||
|
encrypt = s.encryptSymmetrically
|
||||||
|
case s.c.GpgPublicKeyRing != "":
|
||||||
|
encrypt = s.encryptAsymmetrically
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
||||||
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
|
if err := remove(gpgFile); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error removing gpg file")
|
||||||
|
}
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
outFile, err := os.Create(gpgFile)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error opening out file")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := outFile.Close(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing out file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
dst, dstCloseCallback, err := encrypt(outFile)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error encrypting backup file")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := dstCloseCallback(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing encrypted backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
src, err := os.Open(s.file)
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, fmt.Sprintf("error opening backup file `%s`", s.file))
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := src.Close(); err != nil {
|
||||||
|
cleanUpErr = errors.Join(cleanUpErr, errwrap.Wrap(err, "error closing backup file"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err := io.Copy(dst, src); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error writing ciphertext to file")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.file = gpgFile
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Encrypted backup using gpg, saving as `%s`.", s.file),
|
||||||
|
)
|
||||||
|
return cleanUpErr
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
// Portions of this file are taken and adapted from `moby`, Copyright 2012-2017 Docker, Inc.
|
||||||
@@ -9,15 +9,17 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cosiner/argv"
|
"github.com/cosiner/argv"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,7 +28,7 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
|||||||
commandEnv := []string{
|
commandEnv := []string{
|
||||||
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
fmt.Sprintf("COMMAND_RUNTIME_ARCHIVE_FILEPATH=%s", s.file),
|
||||||
}
|
}
|
||||||
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, types.ExecConfig{
|
execID, err := s.cli.ContainerExecCreate(context.Background(), containerRef, container.ExecOptions{
|
||||||
Cmd: args[0],
|
Cmd: args[0],
|
||||||
AttachStdin: true,
|
AttachStdin: true,
|
||||||
AttachStderr: true,
|
AttachStderr: true,
|
||||||
@@ -34,43 +36,51 @@ func (s *script) exec(containerRef string, command string, user string) ([]byte,
|
|||||||
User: user,
|
User: user,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error creating container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error creating container exec")
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, types.ExecStartCheck{})
|
resp, err := s.cli.ContainerExecAttach(context.Background(), execID.ID, container.ExecStartOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error attaching container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error attaching container exec")
|
||||||
}
|
}
|
||||||
defer resp.Close()
|
defer resp.Close()
|
||||||
|
|
||||||
var outBuf, errBuf bytes.Buffer
|
var outBuf, errBuf, fullRespBuf bytes.Buffer
|
||||||
outputDone := make(chan error)
|
outputDone := make(chan error)
|
||||||
|
|
||||||
|
tee := io.TeeReader(resp.Reader, &fullRespBuf)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_, err := stdcopy.StdCopy(&outBuf, &errBuf, resp.Reader)
|
_, err := stdcopy.StdCopy(&outBuf, &errBuf, tee)
|
||||||
outputDone <- err
|
outputDone <- err
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := <-outputDone; err != nil {
|
if err := <-outputDone; err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error demultiplexing output: %w", err)
|
if body, bErr := io.ReadAll(&fullRespBuf); bErr == nil {
|
||||||
|
// if possible, try to append the exec output to the error
|
||||||
|
// as it's likely to be more relevant for users than the error from
|
||||||
|
// calling stdcopy.Copy
|
||||||
|
err = errwrap.Wrap(errors.New(string(body)), err.Error())
|
||||||
|
}
|
||||||
|
return nil, nil, errwrap.Wrap(err, "error demultiplexing output")
|
||||||
}
|
}
|
||||||
|
|
||||||
stdout, err := io.ReadAll(&outBuf)
|
stdout, err := io.ReadAll(&outBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error reading stdout: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error reading stdout")
|
||||||
}
|
}
|
||||||
stderr, err := io.ReadAll(&errBuf)
|
stderr, err := io.ReadAll(&errBuf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error reading stderr: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error reading stderr")
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
res, err := s.cli.ContainerExecInspect(context.Background(), execID.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("exec: error inspecting container exec: %w", err)
|
return nil, nil, errwrap.Wrap(err, "error inspecting container exec")
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.ExitCode > 0 {
|
if res.ExitCode > 0 {
|
||||||
return stdout, stderr, fmt.Errorf("exec: running command exited %d", res.ExitCode)
|
return stdout, stderr, errwrap.Wrap(nil, fmt.Sprintf("running command exited %d", res.ExitCode))
|
||||||
}
|
}
|
||||||
|
|
||||||
return stdout, stderr, nil
|
return stdout, stderr, nil
|
||||||
@@ -86,11 +96,11 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
Value: fmt.Sprintf("docker-volume-backup.exec-label=%s", s.c.ExecLabel),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
containersWithCommand, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersWithCommand, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
var hasDeprecatedContainers bool
|
var hasDeprecatedContainers bool
|
||||||
@@ -99,11 +109,11 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Key: "label",
|
Key: "label",
|
||||||
Value: "docker-volume-backup.exec-pre",
|
Value: "docker-volume-backup.exec-pre",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
if len(deprecatedContainers) != 0 {
|
if len(deprecatedContainers) != 0 {
|
||||||
hasDeprecatedContainers = true
|
hasDeprecatedContainers = true
|
||||||
@@ -116,11 +126,11 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
Key: "label",
|
Key: "label",
|
||||||
Value: "docker-volume-backup.exec-post",
|
Value: "docker-volume-backup.exec-post",
|
||||||
}
|
}
|
||||||
deprecatedContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
deprecatedContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(f...),
|
Filters: filters.NewArgs(f...),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error querying for containers: %w", err)
|
return errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
if len(deprecatedContainers) != 0 {
|
if len(deprecatedContainers) != 0 {
|
||||||
hasDeprecatedContainers = true
|
hasDeprecatedContainers = true
|
||||||
@@ -163,14 +173,14 @@ func (s *script) runLabeledCommands(label string) error {
|
|||||||
os.Stdout.Write(stdout)
|
os.Stdout.Write(stdout)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error executing command: %w", err)
|
return errwrap.Wrap(err, "error executing command")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
if err := g.Wait(); err != nil {
|
||||||
return fmt.Errorf("runLabeledCommands: error from errgroup: %w", err)
|
return errwrap.Wrap(err, "error from errgroup")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -190,13 +200,12 @@ func (s *script) withLabeledCommands(step lifecyclePhase, cb func() error) func(
|
|||||||
}
|
}
|
||||||
return func() (err error) {
|
return func() (err error) {
|
||||||
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
if err = s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-pre", step)); err != nil {
|
||||||
err = fmt.Errorf("withLabeledCommands: %s: error running pre commands: %w", step, err)
|
err = errwrap.Wrap(err, fmt.Sprintf("error running %s-pre commands", step))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step))
|
if derr := s.runLabeledCommands(fmt.Sprintf("docker-volume-backup.%s-post", step)); derr != nil {
|
||||||
if err == nil && derr != nil {
|
err = errors.Join(err, errwrap.Wrap(derr, fmt.Sprintf("error running %s-post commands", step)))
|
||||||
err = derr
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = cb()
|
err = cb()
|
||||||
|
|||||||
@@ -1,12 +1,13 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hook contains a queued action that can be trigger them when the script
|
// hook contains a queued action that can be trigger them when the script
|
||||||
@@ -47,7 +48,7 @@ func (s *script) runHooks(err error) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if actionErr := hook.action(err); actionErr != nil {
|
if actionErr := hook.action(err); actionErr != nil {
|
||||||
actionErrors = append(actionErrors, fmt.Errorf("runHooks: error running hook: %w", actionErr))
|
actionErrors = append(actionErrors, errwrap.Wrap(actionErr, "error running hook"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(actionErrors) != 0 {
|
if len(actionErrors) != 0 {
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gofrs/flock"
|
"github.com/gofrs/flock"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
@@ -31,7 +31,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
|||||||
for {
|
for {
|
||||||
acquired, err := fileLock.TryLock()
|
acquired, err := fileLock.TryLock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("lock: error trying to lock: %w", err)
|
return noop, errwrap.Wrap(err, "error trying to lock")
|
||||||
}
|
}
|
||||||
if acquired {
|
if acquired {
|
||||||
if s.encounteredLock {
|
if s.encounteredLock {
|
||||||
@@ -54,7 +54,7 @@ func (s *script) lock(lockfile string) (func() error, error) {
|
|||||||
case <-retry.C:
|
case <-retry.C:
|
||||||
continue
|
continue
|
||||||
case <-deadline.C:
|
case <-deadline.C:
|
||||||
return noop, errors.New("lock: timed out waiting for lockfile to become available")
|
return noop, errwrap.Wrap(nil, "timed out waiting for lockfile to become available")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,220 +1,23 @@
|
|||||||
// Copyright 2021-2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2021-2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/robfig/cron/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type command struct {
|
|
||||||
logger *slog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func newCommand() *command {
|
|
||||||
return &command{
|
|
||||||
logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *command) must(err error) {
|
|
||||||
if err != nil {
|
|
||||||
c.logger.Error(
|
|
||||||
fmt.Sprintf("Fatal error running command: %v", err),
|
|
||||||
"error",
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runScript(c *Config) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if derr := recover(); derr != nil {
|
|
||||||
err = fmt.Errorf("runScript: unexpected panic running script: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
s, err := newScript(c)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("runScript: error instantiating script: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
runErr := func() (err error) {
|
|
||||||
unlock, err := s.lock("/var/lock/dockervolumebackup.lock")
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("runScript: error acquiring file lock: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
derr := unlock()
|
|
||||||
if err == nil && derr != nil {
|
|
||||||
err = fmt.Errorf("runScript: error releasing file lock: %w", derr)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
scriptErr := func() error {
|
|
||||||
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
|
|
||||||
restartContainersAndServices, err := s.stopContainersAndServices()
|
|
||||||
// The mechanism for restarting containers is not using hooks as it
|
|
||||||
// should happen as soon as possible (i.e. before uploading backups or
|
|
||||||
// similar).
|
|
||||||
defer func() {
|
|
||||||
derr := restartContainersAndServices()
|
|
||||||
if err == nil {
|
|
||||||
err = derr
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = s.createArchive()
|
|
||||||
return
|
|
||||||
})(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
|
||||||
if scriptErr != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"runScript: error %w executing the script followed by %w calling the registered hooks",
|
|
||||||
scriptErr,
|
|
||||||
hookErr,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return fmt.Errorf(
|
|
||||||
"runScript: the script ran successfully, but an error occurred calling the registered hooks: %w",
|
|
||||||
hookErr,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if scriptErr != nil {
|
|
||||||
return fmt.Errorf("runScript: error running script: %w", scriptErr)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
if runErr != nil {
|
|
||||||
s.logger.Error(
|
|
||||||
fmt.Sprintf("Script run failed: %v", runErr), "error", runErr,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return runErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *command) runInForeground() error {
|
|
||||||
cr := cron.New(
|
|
||||||
cron.WithParser(
|
|
||||||
cron.NewParser(
|
|
||||||
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
addJob := func(config *Config) error {
|
|
||||||
if _, err := cr.AddFunc(config.BackupCronExpression, func() {
|
|
||||||
c.logger.Info(
|
|
||||||
fmt.Sprintf(
|
|
||||||
"Now running schedule %s",
|
|
||||||
config.BackupCronExpression,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if err := runScript(config); err != nil {
|
|
||||||
c.logger.Error(
|
|
||||||
fmt.Sprintf(
|
|
||||||
"Unexpected error running schedule %s: %v",
|
|
||||||
config.BackupCronExpression,
|
|
||||||
err,
|
|
||||||
),
|
|
||||||
"error",
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("addJob: error adding schedule %s: %w", config.BackupCronExpression, err)
|
|
||||||
}
|
|
||||||
c.logger.Info(fmt.Sprintf("Successfully scheduled backup with expression %s", config.BackupCronExpression))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cs, err := loadEnvFiles("/etc/dockervolumebackup/conf.d")
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return fmt.Errorf("runInForeground: could not load config from environment files: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := loadEnvVars()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runInForeground: could not load config from environment variables: %w", err)
|
|
||||||
} else {
|
|
||||||
err = addJob(c)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runInForeground: error adding job from env: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.logger.Info("/etc/dockervolumebackup/conf.d was found, using configuration files from this directory.")
|
|
||||||
for _, config := range cs {
|
|
||||||
err = addJob(config)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runInForeground: error adding jobs from conf files: %w", err)
|
|
||||||
}
|
|
||||||
c.logger.Info(
|
|
||||||
fmt.Sprintf("Successfully scheduled backup with expression %s", config.BackupCronExpression),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var quit = make(chan os.Signal, 1)
|
|
||||||
signal.Notify(quit, syscall.SIGTERM, syscall.SIGINT)
|
|
||||||
cr.Start()
|
|
||||||
<-quit
|
|
||||||
ctx := cr.Stop()
|
|
||||||
<-ctx.Done()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *command) runAsCommand() error {
|
|
||||||
config, err := loadEnvVars()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runAsCommand: error loading env vars: %w", err)
|
|
||||||
}
|
|
||||||
err = runScript(config)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("runAsCommand: error running script: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
|
foreground := flag.Bool("foreground", false, "run the tool in the foreground")
|
||||||
|
profile := flag.String("profile", "", "collect runtime metrics and log them periodically on the given cron expression")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
c := newCommand()
|
c := newCommand()
|
||||||
if *foreground {
|
if *foreground {
|
||||||
c.must(c.runInForeground())
|
opts := foregroundOpts{
|
||||||
|
profileCronExpression: *profile,
|
||||||
|
}
|
||||||
|
c.must(c.runInForeground(opts))
|
||||||
} else {
|
} else {
|
||||||
c.must(c.runAsCommand())
|
c.must(c.runAsCommand())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
sTypes "github.com/containrrr/shoutrrr/pkg/types"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed notifications.tmpl
|
//go:embed notifications.tmpl
|
||||||
@@ -37,16 +38,16 @@ func (s *script) notify(titleTemplate string, bodyTemplate string, err error) er
|
|||||||
|
|
||||||
titleBuf := &bytes.Buffer{}
|
titleBuf := &bytes.Buffer{}
|
||||||
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
if err := s.template.ExecuteTemplate(titleBuf, titleTemplate, params); err != nil {
|
||||||
return fmt.Errorf("notify: error executing %s template: %w", titleTemplate, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", titleTemplate))
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyBuf := &bytes.Buffer{}
|
bodyBuf := &bytes.Buffer{}
|
||||||
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
if err := s.template.ExecuteTemplate(bodyBuf, bodyTemplate, params); err != nil {
|
||||||
return fmt.Errorf("notify: error executing %s template: %w", bodyTemplate, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error executing %s template", bodyTemplate))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
if err := s.sendNotification(titleBuf.String(), bodyBuf.String()); err != nil {
|
||||||
return fmt.Errorf("notify: error notifying: %w", err)
|
return errwrap.Wrap(err, "error sending notification")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -70,7 +71,7 @@ func (s *script) sendNotification(title, body string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(errs) != 0 {
|
if len(errs) != 0 {
|
||||||
return fmt.Errorf("sendNotification: error sending message: %w", errors.Join(errs...))
|
return errwrap.Wrap(errors.Join(errs...), "error sending message")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
24
cmd/backup/profile.go
Normal file
24
cmd/backup/profile.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
func (c *command) profile() {
|
||||||
|
memStats := runtime.MemStats{}
|
||||||
|
runtime.ReadMemStats(&memStats)
|
||||||
|
c.logger.Info(
|
||||||
|
"Collecting runtime information",
|
||||||
|
"num_goroutines",
|
||||||
|
runtime.NumGoroutine(),
|
||||||
|
"memory_heap_alloc",
|
||||||
|
formatBytes(memStats.HeapAlloc, false),
|
||||||
|
"memory_heap_inuse",
|
||||||
|
formatBytes(memStats.HeapInuse, false),
|
||||||
|
"memory_heap_sys",
|
||||||
|
formatBytes(memStats.HeapSys, false),
|
||||||
|
"memory_heap_objects",
|
||||||
|
memStats.HeapObjects,
|
||||||
|
)
|
||||||
|
}
|
||||||
66
cmd/backup/prune_backups.go
Normal file
66
cmd/backup/prune_backups.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// pruneBackups rotates away backups from local and remote storages using
|
||||||
|
// the given configuration. In case the given configuration would delete all
|
||||||
|
// backups, it does nothing instead and logs a warning.
|
||||||
|
func (s *script) pruneBackups() error {
|
||||||
|
if s.c.BackupRetentionDays < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
||||||
|
|
||||||
|
eg := errgroup.Group{}
|
||||||
|
for _, backend := range s.storages {
|
||||||
|
b := backend
|
||||||
|
eg.Go(func() error {
|
||||||
|
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
|
||||||
|
s.logger.Info(
|
||||||
|
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
|
||||||
|
)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.stats.Lock()
|
||||||
|
s.stats.Storages[b.Name()] = StorageStats{
|
||||||
|
Total: stats.Total,
|
||||||
|
Pruned: stats.Pruned,
|
||||||
|
}
|
||||||
|
s.stats.Unlock()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := eg.Wait(); err != nil {
|
||||||
|
return errwrap.Wrap(err, "error pruning backups")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// skipPrune returns true if the given backend name is contained in the
|
||||||
|
// list of skipped backends.
|
||||||
|
func skipPrune(name string, skippedBackends []string) bool {
|
||||||
|
return slices.ContainsFunc(
|
||||||
|
skippedBackends,
|
||||||
|
func(b string) bool {
|
||||||
|
return strings.EqualFold(b, name) // ignore case on both sides
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
113
cmd/backup/run_script.go
Normal file
113
cmd/backup/run_script.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// runScript instantiates a new script object and orchestrates a backup run.
|
||||||
|
// To ensure it runs mutually exclusive a global file lock is acquired before
|
||||||
|
// it starts running. Any panic within the script will be recovered and returned
|
||||||
|
// as an error.
|
||||||
|
func runScript(c *Config) (err error) {
|
||||||
|
defer func() {
|
||||||
|
if derr := recover(); derr != nil {
|
||||||
|
fmt.Printf("%s: %s\n", derr, debug.Stack())
|
||||||
|
asErr, ok := derr.(error)
|
||||||
|
if ok {
|
||||||
|
err = errwrap.Wrap(asErr, "unexpected panic running script")
|
||||||
|
} else {
|
||||||
|
err = errwrap.Wrap(nil, fmt.Sprintf("%v", derr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
s := newScript(c)
|
||||||
|
|
||||||
|
unlock, lockErr := s.lock("/var/lock/dockervolumebackup.lock")
|
||||||
|
if lockErr != nil {
|
||||||
|
err = errwrap.Wrap(lockErr, "error acquiring file lock")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if derr := unlock(); derr != nil {
|
||||||
|
err = errors.Join(err, errwrap.Wrap(derr, "error releasing file lock"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
unset, err := s.c.applyEnv()
|
||||||
|
if err != nil {
|
||||||
|
return errwrap.Wrap(err, "error applying env")
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if derr := unset(); derr != nil {
|
||||||
|
err = errors.Join(err, errwrap.Wrap(derr, "error unsetting environment variables"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if initErr := s.init(); initErr != nil {
|
||||||
|
err = errwrap.Wrap(initErr, "error instantiating script")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() (err error) {
|
||||||
|
scriptErr := func() error {
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseArchive, func() (err error) {
|
||||||
|
restartContainersAndServices, err := s.stopContainersAndServices()
|
||||||
|
// The mechanism for restarting containers is not using hooks as it
|
||||||
|
// should happen as soon as possible (i.e. before uploading backups or
|
||||||
|
// similar).
|
||||||
|
defer func() {
|
||||||
|
if derr := restartContainersAndServices(); derr != nil {
|
||||||
|
err = errors.Join(err, errwrap.Wrap(derr, "error restarting containers and services"))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = s.createArchive()
|
||||||
|
return
|
||||||
|
})(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseProcess, s.encryptArchive)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhaseCopy, s.copyArchive)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.withLabeledCommands(lifecyclePhasePrune, s.pruneBackups)(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
if hookErr := s.runHooks(scriptErr); hookErr != nil {
|
||||||
|
if scriptErr != nil {
|
||||||
|
return errwrap.Wrap(
|
||||||
|
nil,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error %v executing the script followed by %v calling the registered hooks",
|
||||||
|
scriptErr,
|
||||||
|
hookErr,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return errwrap.Wrap(
|
||||||
|
hookErr,
|
||||||
|
"the script ran successfully, but an error occurred calling the registered hooks",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if scriptErr != nil {
|
||||||
|
return errwrap.Wrap(scriptErr, "error running script")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -6,17 +6,13 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/fs"
|
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
"github.com/offen/docker-volume-backup/internal/storage/azure"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
"github.com/offen/docker-volume-backup/internal/storage/dropbox"
|
||||||
@@ -25,13 +21,10 @@ import (
|
|||||||
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
"github.com/offen/docker-volume-backup/internal/storage/ssh"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage/webdav"
|
"github.com/offen/docker-volume-backup/internal/storage/webdav"
|
||||||
|
|
||||||
openpgp "github.com/ProtonMail/go-crypto/openpgp/v2"
|
|
||||||
"github.com/containrrr/shoutrrr"
|
"github.com/containrrr/shoutrrr"
|
||||||
"github.com/containrrr/shoutrrr/pkg/router"
|
"github.com/containrrr/shoutrrr/pkg/router"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/leekchan/timeutil"
|
"github.com/leekchan/timeutil"
|
||||||
"github.com/otiai10/copy"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// script holds all the stateful information required to orchestrate a
|
// script holds all the stateful information required to orchestrate a
|
||||||
@@ -57,9 +50,9 @@ type script struct {
|
|||||||
// remote resources like the Docker engine or remote storage locations. All
|
// remote resources like the Docker engine or remote storage locations. All
|
||||||
// reading from env vars or other configuration sources is expected to happen
|
// reading from env vars or other configuration sources is expected to happen
|
||||||
// in this method.
|
// in this method.
|
||||||
func newScript(c *Config) (*script, error) {
|
func newScript(c *Config) *script {
|
||||||
stdOut, logBuffer := buffer(os.Stdout)
|
stdOut, logBuffer := buffer(os.Stdout)
|
||||||
s := &script{
|
return &script{
|
||||||
c: c,
|
c: c,
|
||||||
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
logger: slog.New(slog.NewTextHandler(stdOut, nil)),
|
||||||
stats: &Stats{
|
stats: &Stats{
|
||||||
@@ -75,7 +68,9 @@ func newScript(c *Config) (*script, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *script) init() error {
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
s.registerHook(hookLevelPlumbing, func(error) error {
|
||||||
s.stats.EndTime = time.Now()
|
s.stats.EndTime = time.Now()
|
||||||
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
|
s.stats.TookTime = s.stats.EndTime.Sub(s.stats.StartTime)
|
||||||
@@ -86,14 +81,19 @@ func newScript(c *Config) (*script, error) {
|
|||||||
|
|
||||||
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
tmplFileName, tErr := template.New("extension").Parse(s.file)
|
||||||
if tErr != nil {
|
if tErr != nil {
|
||||||
return nil, fmt.Errorf("newScript: unable to parse backup file extension template: %w", tErr)
|
return errwrap.Wrap(tErr, "unable to parse backup file extension template")
|
||||||
}
|
}
|
||||||
|
|
||||||
var bf bytes.Buffer
|
var bf bytes.Buffer
|
||||||
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
if tErr := tmplFileName.Execute(&bf, map[string]string{
|
||||||
"Extension": fmt.Sprintf("tar.%s", s.c.BackupCompression),
|
"Extension": func() string {
|
||||||
|
if s.c.BackupCompression == "none" {
|
||||||
|
return "tar"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("tar.%s", s.c.BackupCompression)
|
||||||
|
}(),
|
||||||
}); tErr != nil {
|
}); tErr != nil {
|
||||||
return nil, fmt.Errorf("newScript: error executing backup file extension template: %w", tErr)
|
return errwrap.Wrap(tErr, "error executing backup file extension template")
|
||||||
}
|
}
|
||||||
s.file = bf.String()
|
s.file = bf.String()
|
||||||
|
|
||||||
@@ -109,17 +109,21 @@ func newScript(c *Config) (*script, error) {
|
|||||||
if !os.IsNotExist(err) || dockerHostSet {
|
if !os.IsNotExist(err) || dockerHostSet {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: failed to create docker client")
|
return errwrap.Wrap(err, "failed to create docker client")
|
||||||
}
|
}
|
||||||
s.cli = cli
|
s.cli = cli
|
||||||
|
s.registerHook(hookLevelPlumbing, func(err error) error {
|
||||||
|
if err := s.cli.Close(); err != nil {
|
||||||
|
return errwrap.Wrap(err, "failed to close docker client")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
logFunc := func(logType storage.LogLevel, context string, msg string, params ...any) {
|
||||||
switch logType {
|
switch logType {
|
||||||
case storage.LogLevelWarning:
|
case storage.LogLevelWarning:
|
||||||
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
s.logger.Warn(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
case storage.LogLevelError:
|
|
||||||
s.logger.Error(fmt.Sprintf(msg, params...), "storage", context)
|
|
||||||
default:
|
default:
|
||||||
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
s.logger.Info(fmt.Sprintf(msg, params...), "storage", context)
|
||||||
}
|
}
|
||||||
@@ -141,7 +145,7 @@ func newScript(c *Config) (*script, error) {
|
|||||||
}
|
}
|
||||||
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
s3Backend, err := s3.NewStorageBackend(s3Config, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating s3 storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating s3 storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, s3Backend)
|
s.storages = append(s.storages, s3Backend)
|
||||||
}
|
}
|
||||||
@@ -156,7 +160,7 @@ func newScript(c *Config) (*script, error) {
|
|||||||
}
|
}
|
||||||
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
webdavBackend, err := webdav.NewStorageBackend(webDavConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating webdav storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating webdav storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, webdavBackend)
|
s.storages = append(s.storages, webdavBackend)
|
||||||
}
|
}
|
||||||
@@ -173,7 +177,7 @@ func newScript(c *Config) (*script, error) {
|
|||||||
}
|
}
|
||||||
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
sshBackend, err := ssh.NewStorageBackend(sshConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating ssh storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating ssh storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, sshBackend)
|
s.storages = append(s.storages, sshBackend)
|
||||||
}
|
}
|
||||||
@@ -194,10 +198,12 @@ func newScript(c *Config) (*script, error) {
|
|||||||
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
|
PrimaryAccountKey: s.c.AzureStoragePrimaryAccountKey,
|
||||||
Endpoint: s.c.AzureStorageEndpoint,
|
Endpoint: s.c.AzureStorageEndpoint,
|
||||||
RemotePath: s.c.AzureStoragePath,
|
RemotePath: s.c.AzureStoragePath,
|
||||||
|
ConnectionString: s.c.AzureStorageConnectionString,
|
||||||
|
AccessTier: s.c.AzureStorageAccessTier,
|
||||||
}
|
}
|
||||||
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
azureBackend, err := azure.NewStorageBackend(azureConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating azure storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating azure storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, azureBackend)
|
s.storages = append(s.storages, azureBackend)
|
||||||
}
|
}
|
||||||
@@ -214,7 +220,7 @@ func newScript(c *Config) (*script, error) {
|
|||||||
}
|
}
|
||||||
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
dropboxBackend, err := dropbox.NewStorageBackend(dropboxConfig, logFunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating dropbox storage backend: %w", err)
|
return errwrap.Wrap(err, "error creating dropbox storage backend")
|
||||||
}
|
}
|
||||||
s.storages = append(s.storages, dropboxBackend)
|
s.storages = append(s.storages, dropboxBackend)
|
||||||
}
|
}
|
||||||
@@ -240,14 +246,14 @@ func newScript(c *Config) (*script, error) {
|
|||||||
|
|
||||||
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
hookLevel, ok := hookLevels[s.c.NotificationLevel]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("newScript: unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel)
|
return errwrap.Wrap(nil, fmt.Sprintf("unknown NOTIFICATION_LEVEL %s", s.c.NotificationLevel))
|
||||||
}
|
}
|
||||||
s.hookLevel = hookLevel
|
s.hookLevel = hookLevel
|
||||||
|
|
||||||
if len(s.c.NotificationURLs) > 0 {
|
if len(s.c.NotificationURLs) > 0 {
|
||||||
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
sender, senderErr := shoutrrr.CreateSender(s.c.NotificationURLs...)
|
||||||
if senderErr != nil {
|
if senderErr != nil {
|
||||||
return nil, fmt.Errorf("newScript: error creating sender: %w", senderErr)
|
return errwrap.Wrap(senderErr, "error creating sender")
|
||||||
}
|
}
|
||||||
s.sender = sender
|
s.sender = sender
|
||||||
|
|
||||||
@@ -255,13 +261,13 @@ func newScript(c *Config) (*script, error) {
|
|||||||
tmpl.Funcs(templateHelpers)
|
tmpl.Funcs(templateHelpers)
|
||||||
tmpl, err = tmpl.Parse(defaultNotifications)
|
tmpl, err = tmpl.Parse(defaultNotifications)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: unable to parse default notifications templates: %w", err)
|
return errwrap.Wrap(err, "unable to parse default notifications templates")
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
if fi, err := os.Stat("/etc/dockervolumebackup/notifications.d"); err == nil && fi.IsDir() {
|
||||||
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
tmpl, err = tmpl.ParseGlob("/etc/dockervolumebackup/notifications.d/*.*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("newScript: unable to parse user defined notifications templates: %w", err)
|
return errwrap.Wrap(err, "unable to parse user defined notifications templates")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.template = tmpl
|
s.template = tmpl
|
||||||
@@ -282,211 +288,5 @@ func newScript(c *Config) (*script, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createArchive creates a tar archive of the configured backup location and
|
|
||||||
// saves it to disk.
|
|
||||||
func (s *script) createArchive() error {
|
|
||||||
backupSources := s.c.BackupSources
|
|
||||||
|
|
||||||
if s.c.BackupFromSnapshot {
|
|
||||||
s.logger.Warn(
|
|
||||||
"Using BACKUP_FROM_SNAPSHOT has been deprecated and will be removed in the next major version.",
|
|
||||||
)
|
|
||||||
s.logger.Warn(
|
|
||||||
"Please use `archive-pre` and `archive-post` commands to prepare your backup sources. Refer to the documentation for an upgrade guide.",
|
|
||||||
)
|
|
||||||
backupSources = filepath.Join("/tmp", s.c.BackupSources)
|
|
||||||
// copy before compressing guard against a situation where backup folder's content are still growing.
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(backupSources); err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error removing snapshot: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Removed snapshot `%s`.", backupSources),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err := copy.Copy(s.c.BackupSources, backupSources, copy.Options{
|
|
||||||
PreserveTimes: true,
|
|
||||||
PreserveOwner: true,
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error creating snapshot: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Created snapshot of `%s` at `%s`.", s.c.BackupSources, backupSources),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
tarFile := s.file
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(tarFile); err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error removing tar file: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Removed tar file `%s`.", tarFile),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
backupPath, err := filepath.Abs(stripTrailingSlashes(backupSources))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error getting absolute path: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var filesEligibleForBackup []string
|
|
||||||
if err := filepath.WalkDir(backupPath, func(path string, di fs.DirEntry, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.c.BackupExcludeRegexp.Re != nil && s.c.BackupExcludeRegexp.Re.MatchString(path) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
filesEligibleForBackup = append(filesEligibleForBackup, path)
|
|
||||||
return nil
|
|
||||||
}); err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error walking filesystem tree: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := createArchive(filesEligibleForBackup, backupSources, tarFile, s.c.BackupCompression.String(), s.c.GzipParallelism.Int()); err != nil {
|
|
||||||
return fmt.Errorf("createArchive: error compressing backup folder: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Created backup of `%s` at `%s`.", backupSources, tarFile),
|
|
||||||
)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// encryptArchive encrypts the backup file using PGP and the configured passphrase.
|
|
||||||
// In case no passphrase is given it returns early, leaving the backup file
|
|
||||||
// untouched.
|
|
||||||
func (s *script) encryptArchive() error {
|
|
||||||
if s.c.GpgPassphrase == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
gpgFile := fmt.Sprintf("%s.gpg", s.file)
|
|
||||||
s.registerHook(hookLevelPlumbing, func(error) error {
|
|
||||||
if err := remove(gpgFile); err != nil {
|
|
||||||
return fmt.Errorf("encryptArchive: error removing gpg file: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Removed GPG file `%s`.", gpgFile),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
outFile, err := os.Create(gpgFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptArchive: error opening out file: %w", err)
|
|
||||||
}
|
|
||||||
defer outFile.Close()
|
|
||||||
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{
|
|
||||||
FileName: name,
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptArchive: error encrypting backup file: %w", err)
|
|
||||||
}
|
|
||||||
defer dst.Close()
|
|
||||||
|
|
||||||
src, err := os.Open(s.file)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("encryptArchive: error opening backup file `%s`: %w", s.file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(dst, src); err != nil {
|
|
||||||
return fmt.Errorf("encryptArchive: error writing ciphertext to file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.file = gpgFile
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Encrypted backup using given passphrase, saving as `%s`.", s.file),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyArchive makes sure the backup file is copied to both local and remote locations
|
|
||||||
// as per the given configuration.
|
|
||||||
func (s *script) copyArchive() error {
|
|
||||||
_, name := path.Split(s.file)
|
|
||||||
if stat, err := os.Stat(s.file); err != nil {
|
|
||||||
return fmt.Errorf("copyArchive: unable to stat backup file: %w", err)
|
|
||||||
} else {
|
|
||||||
size := stat.Size()
|
|
||||||
s.stats.BackupFile = BackupFileStats{
|
|
||||||
Size: uint64(size),
|
|
||||||
Name: name,
|
|
||||||
FullPath: s.file,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eg := errgroup.Group{}
|
|
||||||
for _, backend := range s.storages {
|
|
||||||
b := backend
|
|
||||||
eg.Go(func() error {
|
|
||||||
return b.Copy(s.file)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
return fmt.Errorf("copyArchive: error copying archive: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneBackups rotates away backups from local and remote storages using
|
|
||||||
// the given configuration. In case the given configuration would delete all
|
|
||||||
// backups, it does nothing instead and logs a warning.
|
|
||||||
func (s *script) pruneBackups() error {
|
|
||||||
if s.c.BackupRetentionDays < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway)
|
|
||||||
|
|
||||||
eg := errgroup.Group{}
|
|
||||||
for _, backend := range s.storages {
|
|
||||||
b := backend
|
|
||||||
eg.Go(func() error {
|
|
||||||
if skipPrune(b.Name(), s.c.BackupSkipBackendsFromPrune) {
|
|
||||||
s.logger.Info(
|
|
||||||
fmt.Sprintf("Skipping pruning for backend `%s`.", b.Name()),
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
stats, err := b.Prune(deadline, s.c.BackupPruningPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.stats.Lock()
|
|
||||||
s.stats.Storages[b.Name()] = StorageStats{
|
|
||||||
Total: stats.Total,
|
|
||||||
Pruned: stats.Pruned,
|
|
||||||
}
|
|
||||||
s.stats.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg.Wait(); err != nil {
|
|
||||||
return fmt.Errorf("pruneBackups: error pruning backups: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipPrune returns true if the given backend name is contained in the
|
|
||||||
// list of skipped backends.
|
|
||||||
func skipPrune(name string, skippedBackends []string) bool {
|
|
||||||
return slices.ContainsFunc(
|
|
||||||
skippedBackends,
|
|
||||||
func(b string) bool {
|
|
||||||
return strings.EqualFold(b, name) // ignore case on both sides
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -11,28 +14,31 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/cli/cli/command/service/progress"
|
"github.com/docker/cli/cli/command/service/progress"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
ctr "github.com/docker/docker/api/types/container"
|
ctr "github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
func scaleService(cli *client.Client, serviceID string, replicas uint64) ([]string, error) {
|
||||||
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("scaleService: error inspecting service %s: %w", serviceID, err)
|
return nil, errwrap.Wrap(err, fmt.Sprintf("error inspecting service %s", serviceID))
|
||||||
}
|
}
|
||||||
serviceMode := &service.Spec.Mode
|
serviceMode := &service.Spec.Mode
|
||||||
switch {
|
switch {
|
||||||
case serviceMode.Replicated != nil:
|
case serviceMode.Replicated != nil:
|
||||||
serviceMode.Replicated.Replicas = &replicas
|
serviceMode.Replicated.Replicas = &replicas
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("scaleService: service to be scaled %s has to be in replicated mode", service.Spec.Name)
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("service to be scaled %s has to be in replicated mode", service.Spec.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
response, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("scaleService: error updating service: %w", err)
|
return nil, errwrap.Wrap(err, "error updating service")
|
||||||
}
|
}
|
||||||
|
|
||||||
discardWriter := &noopWriteCloser{io.Discard}
|
discardWriter := &noopWriteCloser{io.Discard}
|
||||||
@@ -51,21 +57,24 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"awaitContainerCount: timed out after waiting %s for service %s to reach desired container count of %d",
|
nil,
|
||||||
timeoutAfter,
|
fmt.Sprintf(
|
||||||
serviceID,
|
"timed out after waiting %s for service %s to reach desired container count of %d",
|
||||||
count,
|
timeoutAfter,
|
||||||
|
serviceID,
|
||||||
|
count,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
case <-poll.C:
|
case <-poll.C:
|
||||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containers, err := cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
Value: fmt.Sprintf("com.docker.swarm.service.id=%s", serviceID),
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("awaitContainerCount: error listing containers: %w", err)
|
return errwrap.Wrap(err, "error listing containers")
|
||||||
}
|
}
|
||||||
if len(containers) == count {
|
if len(containers) == count {
|
||||||
return nil
|
return nil
|
||||||
@@ -74,6 +83,16 @@ func awaitContainerCountForService(cli *client.Client, serviceID string, count i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSwarm(c interface {
|
||||||
|
Info(context.Context) (system.Info, error)
|
||||||
|
}) (bool, error) {
|
||||||
|
info, err := c.Info(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return false, errwrap.Wrap(err, "error getting docker info")
|
||||||
|
}
|
||||||
|
return info.Swarm.LocalNodeState != "" && info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive, nil
|
||||||
|
}
|
||||||
|
|
||||||
// stopContainersAndServices stops all Docker containers that are marked as to being
|
// stopContainersAndServices stops all Docker containers that are marked as to being
|
||||||
// stopped during the backup and returns a function that can be called to
|
// stopped during the backup and returns a function that can be called to
|
||||||
// restart everything that has been stopped.
|
// restart everything that has been stopped.
|
||||||
@@ -82,11 +101,10 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
return noop, nil
|
return noop, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerInfo, err := s.cli.Info(context.Background())
|
isDockerSwarm, err := isSwarm(s.cli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error getting docker info: %w", err)
|
return noop, errwrap.Wrap(err, "error determining swarm state")
|
||||||
}
|
}
|
||||||
isDockerSwarm := dockerInfo.Swarm.LocalNodeState != "inactive"
|
|
||||||
|
|
||||||
labelValue := s.c.BackupStopDuringBackupLabel
|
labelValue := s.c.BackupStopDuringBackupLabel
|
||||||
if s.c.BackupStopContainerLabel != "" {
|
if s.c.BackupStopContainerLabel != "" {
|
||||||
@@ -97,7 +115,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
"Please use BACKUP_STOP_DURING_BACKUP_LABEL instead. Refer to the docs for an upgrade guide.",
|
||||||
)
|
)
|
||||||
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
if _, ok := os.LookupEnv("BACKUP_STOP_DURING_BACKUP_LABEL"); ok {
|
||||||
return noop, errors.New("(*script).stopContainersAndServices: both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
return noop, errwrap.Wrap(nil, "both BACKUP_STOP_DURING_BACKUP_LABEL and BACKUP_STOP_CONTAINER_LABEL have been set, cannot continue")
|
||||||
}
|
}
|
||||||
labelValue = s.c.BackupStopContainerLabel
|
labelValue = s.c.BackupStopContainerLabel
|
||||||
}
|
}
|
||||||
@@ -107,18 +125,18 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
labelValue,
|
labelValue,
|
||||||
)
|
)
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
allContainers, err := s.cli.ContainerList(context.Background(), container.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for containers")
|
||||||
}
|
}
|
||||||
containersToStop, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(context.Background(), container.ListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
Key: "label",
|
Key: "label",
|
||||||
Value: filterMatchLabel,
|
Value: filterMatchLabel,
|
||||||
}),
|
}),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for containers to stop: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for containers to stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
var allServices []swarm.Service
|
var allServices []swarm.Service
|
||||||
@@ -126,7 +144,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if isDockerSwarm {
|
if isDockerSwarm {
|
||||||
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
allServices, err = s.cli.ServiceList(context.Background(), types.ServiceListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services: %w", err)
|
return noop, errwrap.Wrap(err, "error querying for services")
|
||||||
}
|
}
|
||||||
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
matchingServices, err := s.cli.ServiceList(context.Background(), types.ServiceListOptions{
|
||||||
Filters: filters.NewArgs(filters.KeyValuePair{
|
Filters: filters.NewArgs(filters.KeyValuePair{
|
||||||
@@ -135,15 +153,21 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
}),
|
}),
|
||||||
Status: true,
|
Status: true,
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return noop, errwrap.Wrap(err, "error querying for services to scale down")
|
||||||
|
}
|
||||||
for _, s := range matchingServices {
|
for _, s := range matchingServices {
|
||||||
|
if s.Spec.Mode.Replicated == nil {
|
||||||
|
return noop, errwrap.Wrap(
|
||||||
|
nil,
|
||||||
|
fmt.Sprintf("only replicated services can be restarted, but found a label on service %s", s.Spec.Name),
|
||||||
|
)
|
||||||
|
}
|
||||||
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
|
servicesToScaleDown = append(servicesToScaleDown, handledSwarmService{
|
||||||
serviceID: s.ID,
|
serviceID: s.ID,
|
||||||
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
|
initialReplicaCount: *s.Spec.Mode.Replicated.Replicas,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for services to scale down: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
|
if len(containersToStop) == 0 && len(servicesToScaleDown) == 0 {
|
||||||
@@ -155,14 +179,17 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
if swarmServiceID, ok := container.Labels["com.docker.swarm.service.id"]; ok {
|
||||||
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
parentService, _, err := s.cli.ServiceInspectWithRaw(context.Background(), swarmServiceID, types.ServiceInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return noop, fmt.Errorf("(*script).stopContainersAndServices: error querying for parent service with ID %s: %w", swarmServiceID, err)
|
return noop, errwrap.Wrap(err, fmt.Sprintf("error querying for parent service with ID %s", swarmServiceID))
|
||||||
}
|
}
|
||||||
for label := range parentService.Spec.Labels {
|
for label := range parentService.Spec.Labels {
|
||||||
if label == "docker-volume-backup.stop-during-backup" {
|
if label == "docker-volume-backup.stop-during-backup" {
|
||||||
return noop, fmt.Errorf(
|
return noop, errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
nil,
|
||||||
container.Names[0],
|
fmt.Sprintf(
|
||||||
parentService.Spec.Name,
|
"container %s is labeled to stop but has parent service %s which is also labeled, cannot continue",
|
||||||
|
container.Names[0],
|
||||||
|
parentService.Spec.Name,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -245,10 +272,12 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
var initialErr error
|
var initialErr error
|
||||||
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
allErrors := append(stopErrors, scaleDownErrors.value()...)
|
||||||
if len(allErrors) != 0 {
|
if len(allErrors) != 0 {
|
||||||
initialErr = fmt.Errorf(
|
initialErr = errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: %d error(s) stopping containers: %w",
|
|
||||||
len(allErrors),
|
|
||||||
errors.Join(allErrors...),
|
errors.Join(allErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) stopping containers",
|
||||||
|
len(allErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,7 +297,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
restartErrors = append(
|
restartErrors = append(
|
||||||
restartErrors,
|
restartErrors,
|
||||||
fmt.Errorf("(*script).stopContainersAndServices: error looking up parent service: %w", err),
|
errwrap.Wrap(err, "error looking up parent service"),
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -282,7 +311,7 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.cli.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
|
if err := s.cli.ContainerStart(context.Background(), container.ID, ctr.StartOptions{}); err != nil {
|
||||||
restartErrors = append(restartErrors, err)
|
restartErrors = append(restartErrors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -311,10 +340,12 @@ func (s *script) stopContainersAndServices() (func() error, error) {
|
|||||||
|
|
||||||
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
allErrors := append(restartErrors, scaleUpErrors.value()...)
|
||||||
if len(allErrors) != 0 {
|
if len(allErrors) != 0 {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*script).stopContainersAndServices: %d error(s) restarting containers and services: %w",
|
|
||||||
len(allErrors),
|
|
||||||
errors.Join(allErrors...),
|
errors.Join(allErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) restarting containers and services",
|
||||||
|
len(allErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
85
cmd/backup/stop_restart_test.go
Normal file
85
cmd/backup/stop_restart_test.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockInfoClient struct {
|
||||||
|
result system.Info
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockInfoClient) Info(context.Context) (system.Info, error) {
|
||||||
|
return m.result, m.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsSwarm(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
client *mockInfoClient
|
||||||
|
expected bool
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"swarm",
|
||||||
|
&mockInfoClient{
|
||||||
|
result: system.Info{
|
||||||
|
Swarm: swarm.Info{
|
||||||
|
LocalNodeState: swarm.LocalNodeStateActive,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"compose",
|
||||||
|
&mockInfoClient{
|
||||||
|
result: system.Info{
|
||||||
|
Swarm: swarm.Info{
|
||||||
|
LocalNodeState: swarm.LocalNodeStateInactive,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"balena",
|
||||||
|
&mockInfoClient{
|
||||||
|
result: system.Info{
|
||||||
|
Swarm: swarm.Info{
|
||||||
|
LocalNodeState: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error",
|
||||||
|
&mockInfoClient{
|
||||||
|
err: errors.New("the dinosaurs escaped"),
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
result, err := isSwarm(test.client)
|
||||||
|
if (err != nil) != test.expectError {
|
||||||
|
t.Errorf("Unexpected error value %v", err)
|
||||||
|
}
|
||||||
|
if test.expected != result {
|
||||||
|
t.Errorf("Expected %v, got %v", test.expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
3
cmd/backup/testdata/braces.env
vendored
Normal file
3
cmd/backup/testdata/braces.env
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
FOO=${bar:-qux}
|
||||||
|
BAR=xxx
|
||||||
|
BAZ=$NOPE
|
||||||
7
cmd/backup/testdata/comments.env
vendored
Normal file
7
cmd/backup/testdata/comments.env
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# This is a comment about `why` things are here
|
||||||
|
# FOO="${bar:-qux}"
|
||||||
|
# e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`. Expansion happens before`
|
||||||
|
|
||||||
|
BAR=xxx
|
||||||
|
|
||||||
|
BAZ=$QUX
|
||||||
2
cmd/backup/testdata/default.env
vendored
Normal file
2
cmd/backup/testdata/default.env
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
FOO=bar
|
||||||
|
BAZ=qux
|
||||||
4
cmd/backup/testdata/expansion.env
vendored
Normal file
4
cmd/backup/testdata/expansion.env
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
BAR=xxx
|
||||||
|
FOO=${BAR}
|
||||||
|
BAZ=$BAR
|
||||||
|
QUX=${QUX}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -9,6 +9,10 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
|
"github.com/robfig/cron/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
var noop = func() error { return nil }
|
var noop = func() error { return nil }
|
||||||
@@ -20,7 +24,7 @@ func remove(location string) error {
|
|||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("remove: error checking for existence of `%s`: %w", location, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error checking for existence of `%s`", location))
|
||||||
}
|
}
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
err = os.RemoveAll(location)
|
err = os.RemoveAll(location)
|
||||||
@@ -28,7 +32,7 @@ func remove(location string) error {
|
|||||||
err = os.Remove(location)
|
err = os.Remove(location)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("remove: error removing `%s`: %w", location, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error removing `%s", location))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -47,7 +51,7 @@ type bufferingWriter struct {
|
|||||||
|
|
||||||
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
func (b *bufferingWriter) Write(p []byte) (n int, err error) {
|
||||||
if n, err := b.buf.Write(p); err != nil {
|
if n, err := b.buf.Write(p); err != nil {
|
||||||
return n, fmt.Errorf("(*bufferingWriter).Write: error writing to buffer: %w", err)
|
return n, errwrap.Wrap(err, "error writing to buffer")
|
||||||
}
|
}
|
||||||
return b.writer.Write(p)
|
return b.writer.Write(p)
|
||||||
}
|
}
|
||||||
@@ -79,3 +83,22 @@ func (c *concurrentSlice[T]) append(v T) {
|
|||||||
func (c *concurrentSlice[T]) value() []T {
|
func (c *concurrentSlice[T]) value() []T {
|
||||||
return c.val
|
return c.val
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// checkCronSchedule detects whether the given cron expression will actually
|
||||||
|
// ever be executed or not.
|
||||||
|
func checkCronSchedule(expression string) (ok bool) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
sched, err := cron.ParseStandard(expression)
|
||||||
|
if err != nil {
|
||||||
|
ok = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
sched.Next(now) // panics when the cron would never run
|
||||||
|
ok = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
@@ -59,11 +59,13 @@ GEM
|
|||||||
rb-fsevent (0.11.2)
|
rb-fsevent (0.11.2)
|
||||||
rb-inotify (0.10.1)
|
rb-inotify (0.10.1)
|
||||||
ffi (~> 1.0)
|
ffi (~> 1.0)
|
||||||
rexml (3.2.6)
|
rexml (3.3.3)
|
||||||
|
strscan
|
||||||
rouge (3.30.0)
|
rouge (3.30.0)
|
||||||
safe_yaml (1.0.5)
|
safe_yaml (1.0.5)
|
||||||
sassc (2.4.0)
|
sassc (2.4.0)
|
||||||
ffi (~> 1.9)
|
ffi (~> 1.9)
|
||||||
|
strscan (3.1.0)
|
||||||
terminal-table (3.0.2)
|
terminal-table (3.0.2)
|
||||||
unicode-display_width (>= 1.1.1, < 3)
|
unicode-display_width (>= 1.1.1, < 3)
|
||||||
unicode-display_width (2.4.2)
|
unicode-display_width (2.4.2)
|
||||||
|
|||||||
@@ -30,6 +30,6 @@ nav_external_links:
|
|||||||
url: https://github.com/offen/docker-volume-backup
|
url: https://github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
footer_content: >-
|
footer_content: >-
|
||||||
Copyright © 2021 Offen Authors and contributors.
|
Copyright © 2024 <a target="_blank" href="https://www.offen.software">offen.software</a> and contributors.
|
||||||
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License.</a><br>
|
Distributed under the <a href="https://github.com/offen/docker-volume-backup/tree/main/LICENSE">MPL-2.0 License.</a><br>
|
||||||
Something missing, unclear or not working? Open <a href="https://github.com/offen/docker-volume-backup/issues">an issue</a>.
|
Something missing, unclear or not working? Open <a href="https://github.com/offen/docker-volume-backup/issues">an issue</a>.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ nav_order: 7
|
|||||||
# Encrypt backups using GPG
|
# Encrypt backups using GPG
|
||||||
|
|
||||||
The image supports encrypting backups using GPG out of the box.
|
The image supports encrypting backups using GPG out of the box.
|
||||||
In case a `GPG_PASSPHRASE` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
In case a `GPG_PASSPHRASE` or `GPG_PUBLIC_KEY_RING` environment variable is set, the backup archive will be encrypted using the given key and saved as a `.gpg` file instead.
|
||||||
|
|
||||||
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
Assuming you have `gpg` installed, you can decrypt such a backup using (your OS will prompt for the passphrase before decryption can happen):
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Replace deprecated BACKUP_FROM_SNAPSHOT usage
|
title: Replace deprecated BACKUP_FROM_SNAPSHOT usage
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 16
|
nav_order: 17
|
||||||
---
|
---
|
||||||
|
|
||||||
# Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
|
# Replace deprecated `BACKUP_FROM_SNAPSHOT` usage
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Replace deprecated BACKUP_STOP_CONTAINER_LABEL setting
|
title: Replace deprecated BACKUP_STOP_CONTAINER_LABEL setting
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 19
|
nav_order: 20
|
||||||
---
|
---
|
||||||
|
|
||||||
# Replace deprecated `BACKUP_STOP_CONTAINER_LABEL` setting
|
# Replace deprecated `BACKUP_STOP_CONTAINER_LABEL` setting
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Replace deprecated exec-pre and exec-post labels
|
title: Replace deprecated exec-pre and exec-post labels
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 17
|
nav_order: 18
|
||||||
---
|
---
|
||||||
|
|
||||||
# Replace deprecated `exec-pre` and `exec-post` labels
|
# Replace deprecated `exec-pre` and `exec-post` labels
|
||||||
|
|||||||
@@ -9,6 +9,11 @@ parent: How Tos
|
|||||||
|
|
||||||
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
In certain scenarios it can be required to run specific commands before and after a backup is taken (e.g. dumping a database).
|
||||||
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
When mounting the Docker socket into the `docker-volume-backup` container, you can define pre- and post-commands that will be run in the context of the target container (it is also possible to run commands inside the `docker-volume-backup` container itself using this feature).
|
||||||
|
|
||||||
|
{: .important }
|
||||||
|
In a multi-node Swarm setup, commands can currently only be run on the node the `offen/docker-volume-backup` container is running on.
|
||||||
|
Labeled containers on other nodes are not visible to the backup command.
|
||||||
|
|
||||||
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
Such commands are defined by specifying the command in a `docker-volume-backup.[step]-[pre|post]` label where `step` can be any of the following phases of a backup lifecycle:
|
||||||
|
|
||||||
- `archive` (the tar archive is created)
|
- `archive` (the tar archive is created)
|
||||||
@@ -46,6 +51,10 @@ If you have more than one `docker-volume-backup` container (possibly across seve
|
|||||||
multiple backup schedules, you will need to use `EXEC_LABEL` in the configuration and a `docker-volume-backup.exec-label` label on each
|
multiple backup schedules, you will need to use `EXEC_LABEL` in the configuration and a `docker-volume-backup.exec-label` label on each
|
||||||
container using custom commands to ensure that the commands are only run by the correct `docker-volume-backup` instance.
|
container using custom commands to ensure that the commands are only run by the correct `docker-volume-backup` instance.
|
||||||
|
|
||||||
|
{: .important }
|
||||||
|
In case you use `EXEC_LABEL` together with configuration mounted from `conf.d` it's important to understand that a distinct `EXEC_LABEL` __should be set in each configuration__.
|
||||||
|
Else, schedules that do not specify an `EXEC_LABEL` will still trigger commands on all containers with such labels, no matter whether they specify `docker-volume-backup.exec-label` or not.
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
|
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ services:
|
|||||||
Notification backends other than email are also supported.
|
Notification backends other than email are also supported.
|
||||||
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
Refer to the documentation of [shoutrrr][shoutrrr-docs] to find out about options and configuration.
|
||||||
|
|
||||||
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
[shoutrrr-docs]: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||||
|
|
||||||
{: .note }
|
{: .note }
|
||||||
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
If you also want notifications on successful executions, set `NOTIFICATION_LEVEL` to `info`.
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
title: Update deprecated email configuration
|
title: Update deprecated email configuration
|
||||||
layout: default
|
layout: default
|
||||||
parent: How Tos
|
parent: How Tos
|
||||||
nav_order: 18
|
nav_order: 19
|
||||||
---
|
---
|
||||||
|
|
||||||
# Update deprecated email configuration
|
# Update deprecated email configuration
|
||||||
|
|||||||
36
docs/how-tos/use-as-non-root.md
Normal file
36
docs/how-tos/use-as-non-root.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
title: Use the image as a non-root user
|
||||||
|
layout: default
|
||||||
|
parent: How Tos
|
||||||
|
nav_order: 16
|
||||||
|
---
|
||||||
|
|
||||||
|
# Use the image as a non-root user
|
||||||
|
|
||||||
|
{: .important }
|
||||||
|
Running as a non-root user limits interaction with the Docker Daemon.
|
||||||
|
If you want to stop and restart containers and services during backup, and the host's Docker daemon is running as root, you will also need to run this tool as root.
|
||||||
|
|
||||||
|
By default, this image executes backups using the `root` user.
|
||||||
|
In case you prefer to use a different user, you can use Docker's [`user`](https://docs.docker.com/engine/reference/run/#user) option, passing the user and group id:
|
||||||
|
|
||||||
|
```console
|
||||||
|
docker run --rm \
|
||||||
|
-v data:/backup/data \
|
||||||
|
--env AWS_ACCESS_KEY_ID="<xxx>" \
|
||||||
|
--env AWS_SECRET_ACCESS_KEY="<xxx>" \
|
||||||
|
--env AWS_S3_BUCKET_NAME="<xxx>" \
|
||||||
|
--entrypoint backup \
|
||||||
|
--user 1000:1000 \
|
||||||
|
offen/docker-volume-backup:v2
|
||||||
|
```
|
||||||
|
|
||||||
|
or in a compose file:
|
||||||
|
|
||||||
|
```yml
|
||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
user: 1000:1000
|
||||||
|
# further configuration omitted ...
|
||||||
|
```
|
||||||
@@ -88,7 +88,7 @@ docker run --rm \
|
|||||||
|
|
||||||
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
Alternatively, pass a `--env-file` in order to use a full config as described below.
|
||||||
|
|
||||||
### Available image registries
|
## Available image registries
|
||||||
|
|
||||||
This Docker image is published to both Docker Hub and the GitHub container registry.
|
This Docker image is published to both Docker Hub and the GitHub container registry.
|
||||||
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
|
Depending on your preferences and needs, you can reference both `offen/docker-volume-backup` as well as `ghcr.io/offen/docker-volume-backup`:
|
||||||
@@ -100,6 +100,11 @@ docker pull ghcr.io/offen/docker-volume-backup:v2
|
|||||||
|
|
||||||
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
Documentation references Docker Hub, but all examples will work using ghcr.io just as well.
|
||||||
|
|
||||||
|
## Supported Engines
|
||||||
|
|
||||||
|
This tool is developed and tested against the Docker CE engine exclusively.
|
||||||
|
While it may work against different implementations (e.g. Balena Engine), there are no guarantees about support for non-Docker engines.
|
||||||
|
|
||||||
## Differences to `jareware/docker-volume-backup`
|
## Differences to `jareware/docker-volume-backup`
|
||||||
|
|
||||||
This image is heavily inspired by `jareware/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
This image is heavily inspired by `jareware/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|||||||
@@ -289,7 +289,7 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
## Encrypting your backups using GPG
|
## Encrypting your backups symmetrically using GPG
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
@@ -311,6 +311,33 @@ volumes:
|
|||||||
data:
|
data:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Encrypting your backups asymmetrically using GPG
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
environment:
|
||||||
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
GPG_PUBLIC_KEY_RING: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
...
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|
||||||
## Using mysqldump to prepare the backup
|
## Using mysqldump to prepare the backup
|
||||||
|
|
||||||
```yml
|
```yml
|
||||||
@@ -371,3 +398,24 @@ volumes:
|
|||||||
data_1:
|
data_1:
|
||||||
data_2:
|
data_2:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Running as a non-root user
|
||||||
|
|
||||||
|
```yml
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# ... define other services using the `data` volume here
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:v2
|
||||||
|
user: 1000:1000
|
||||||
|
environment:
|
||||||
|
AWS_S3_BUCKET_NAME: backup-bucket
|
||||||
|
AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE
|
||||||
|
AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||||
|
volumes:
|
||||||
|
- data:/backup/my-app-backup:ro
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
data:
|
||||||
|
```
|
||||||
|
|||||||
@@ -43,8 +43,8 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
# BACKUP_CRON_EXPRESSION="0 2 * * *"
|
||||||
|
|
||||||
# The compression algorithm used in conjunction with tar.
|
# The compression algorithm used in conjunction with tar.
|
||||||
# Valid options are: "gz" (Gzip) and "zst" (Zstd).
|
# Valid options are: "gz" (Gzip), "zst" (Zstd) or "none" (tar only).
|
||||||
# Note that the selection affects the file extension.
|
# Default is "gz". Note that the selection affects the file extension.
|
||||||
|
|
||||||
# BACKUP_COMPRESSION="gz"
|
# BACKUP_COMPRESSION="gz"
|
||||||
|
|
||||||
@@ -60,7 +60,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# will result in the same filename for every backup run, which means previous
|
# will result in the same filename for every backup run, which means previous
|
||||||
# versions will be overwritten on subsequent runs.
|
# versions will be overwritten on subsequent runs.
|
||||||
# Extension can be defined literally or via "{{ .Extension }}" template,
|
# Extension can be defined literally or via "{{ .Extension }}" template,
|
||||||
# in which case it will become either "tar.gz" or "tar.zst" (depending
|
# in which case it will become either "tar.gz", "tar.zst" or ".tar" (depending
|
||||||
# on your BACKUP_COMPRESSION setting).
|
# on your BACKUP_COMPRESSION setting).
|
||||||
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
# The default results in filenames like: `backup-2021-08-29T04-00-00.tar.gz`.
|
||||||
|
|
||||||
@@ -245,10 +245,17 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
|
# AZURE_STORAGE_ACCOUNT_NAME="account-name"
|
||||||
|
|
||||||
# The credential's primary account key when using Azure Blob Storage. If this
|
# The credential's primary account key when using Azure Blob Storage. If this
|
||||||
# is not given, the command tries to fall back to using a managed identity.
|
# is not given, the command tries to fall back to using a connection string
|
||||||
|
# (if given) or a managed identity (if nothing is given).
|
||||||
|
|
||||||
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
|
# AZURE_STORAGE_PRIMARY_ACCOUNT_KEY="<xxx>"
|
||||||
|
|
||||||
|
# A connection string for accessing Azure Blob Storage. If this
|
||||||
|
# is not given, the command tries to fall back to using a primary account key
|
||||||
|
# (if given) or a managed identity (if nothing is given).
|
||||||
|
|
||||||
|
# AZURE_STORAGE_CONNECTION_STRING="<xxx>"
|
||||||
|
|
||||||
# The container name when using Azure Blob Storage.
|
# The container name when using Azure Blob Storage.
|
||||||
|
|
||||||
# AZURE_STORAGE_CONTAINER_NAME="container-name"
|
# AZURE_STORAGE_CONTAINER_NAME="container-name"
|
||||||
@@ -262,6 +269,11 @@ You can populate below template according to your requirements and use it as you
|
|||||||
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
# Note: Use your app's subpath in Dropbox, if it doesn't have global access.
|
||||||
# Consulte the README for further information.
|
# Consulte the README for further information.
|
||||||
|
|
||||||
|
# The access tier when using Azure Blob Storage. Possible values are
|
||||||
|
# https://github.com/Azure/azure-sdk-for-go/blob/sdk/storage/azblob/v1.3.2/sdk/storage/azblob/internal/generated/zz_constants.go#L14-L30
|
||||||
|
|
||||||
|
# AZURE_STORAGE_ACCESS_TIER="Cold"
|
||||||
|
|
||||||
# DROPBOX_REMOTE_PATH="/my/directory"
|
# DROPBOX_REMOTE_PATH="/my/directory"
|
||||||
|
|
||||||
# Number of concurrent chunked uploads for Dropbox.
|
# Number of concurrent chunked uploads for Dropbox.
|
||||||
@@ -325,10 +337,19 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
########### BACKUP ENCRYPTION
|
########### BACKUP ENCRYPTION
|
||||||
|
|
||||||
# Backups can be encrypted using gpg in case a passphrase is given.
|
# Backups can be encrypted symmetrically using gpg in case a passphrase is given.
|
||||||
|
|
||||||
# GPG_PASSPHRASE="<xxx>"
|
# GPG_PASSPHRASE="<xxx>"
|
||||||
|
|
||||||
|
# Backups can be encrypted asymmetrically using gpg in case publickeys are given.
|
||||||
|
|
||||||
|
# GPG_PUBLIC_KEY_RING= |
|
||||||
|
#-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
#
|
||||||
|
#D/cIHu6GH/0ghlcUVSbgMg5RRI5QKNNKh04uLAPxr75mKwUg0xPUaWgyyrAChVBi
|
||||||
|
#...
|
||||||
|
#-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
########### STOPPING CONTAINERS AND SERVICES DURING BACKUP
|
||||||
|
|
||||||
# Containers or services can be stopped by applying a
|
# Containers or services can be stopped by applying a
|
||||||
@@ -371,7 +392,7 @@ You can populate below template according to your requirements and use it as you
|
|||||||
|
|
||||||
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
# Notifications (email, Slack, etc.) can be sent out when a backup run finishes.
|
||||||
# Configuration is provided as a comma-separated list of URLs as consumed
|
# Configuration is provided as a comma-separated list of URLs as consumed
|
||||||
# by `shoutrrr`: https://containrrr.dev/shoutrrr/0.7/services/overview/
|
# by `shoutrrr`: https://containrrr.dev/shoutrrr/v0.8/services/overview/
|
||||||
# The content of such notifications can be customized. Dedicated documentation
|
# The content of such notifications can be customized. Dedicated documentation
|
||||||
# on how to do this can be found in the README. When providing multiple URLs or
|
# on how to do this can be found in the README. When providing multiple URLs or
|
||||||
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
# an URL that contains a comma, the values can be URL encoded to avoid ambiguities.
|
||||||
|
|||||||
79
go.mod
79
go.mod
@@ -1,77 +1,84 @@
|
|||||||
module github.com/offen/docker-volume-backup
|
module github.com/offen/docker-volume-backup
|
||||||
|
|
||||||
go 1.21
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0
|
||||||
github.com/containrrr/shoutrrr v0.7.1
|
github.com/containrrr/shoutrrr v0.8.0
|
||||||
github.com/cosiner/argv v0.1.0
|
github.com/cosiner/argv v0.1.0
|
||||||
github.com/docker/cli v24.0.1+incompatible
|
github.com/docker/cli v27.1.1+incompatible
|
||||||
github.com/docker/docker v24.0.7+incompatible
|
github.com/docker/docker v27.1.1+incompatible
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.12.1
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/klauspost/compress v1.17.6
|
github.com/klauspost/compress v1.17.9
|
||||||
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.66
|
github.com/minio/minio-go/v7 v7.0.74
|
||||||
github.com/offen/envconfig v1.5.0
|
github.com/offen/envconfig v1.5.0
|
||||||
github.com/otiai10/copy v1.14.0
|
github.com/otiai10/copy v1.14.0
|
||||||
github.com/pkg/sftp v1.13.6
|
github.com/pkg/sftp v1.13.6
|
||||||
github.com/robfig/cron/v3 v3.0.0
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/studio-b12/gowebdav v0.9.0
|
github.com/studio-b12/gowebdav v0.9.0
|
||||||
golang.org/x/crypto v0.18.0
|
golang.org/x/crypto v0.25.0
|
||||||
golang.org/x/oauth2 v0.16.0
|
golang.org/x/oauth2 v0.22.0
|
||||||
golang.org/x/sync v0.6.0
|
golang.org/x/sync v0.8.0
|
||||||
|
mvdan.cc/sh/v3 v3.8.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/cloudflare/circl v1.3.7 // indirect
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-ini/ini v1.67.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/goccy/go-json v0.10.3 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.26.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.1.0-alpha.0
|
github.com/ProtonMail/go-crypto v1.1.0-alpha.1
|
||||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.13.0 // indirect
|
github.com/fatih/color v1.17.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/google/uuid v1.5.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
|
||||||
github.com/klauspost/pgzip v1.2.6
|
github.com/klauspost/pgzip v1.2.6
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/minio/md5-simd v1.1.2 // indirect
|
github.com/minio/md5-simd v1.1.2 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.5.0 // indirect
|
github.com/rs/xid v1.5.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
golang.org/x/net v0.20.0 // indirect
|
golang.org/x/net v0.27.0 // indirect
|
||||||
golang.org/x/sys v0.16.0 // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
|
||||||
gotest.tools/v3 v3.0.3 // indirect
|
gotest.tools/v3 v3.0.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
43
internal/errwrap/wrap.go
Normal file
43
internal/errwrap/wrap.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2024 - offen.software <hioffen@posteo.de>
|
||||||
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
|
package errwrap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wrap wraps the given error using the given message while prepending
|
||||||
|
// the name of the calling function, creating a poor man's stack trace
|
||||||
|
func Wrap(err error, msg string) error {
|
||||||
|
pc := make([]uintptr, 15)
|
||||||
|
n := runtime.Callers(2, pc)
|
||||||
|
frames := runtime.CallersFrames(pc[:n])
|
||||||
|
frame, _ := frames.Next()
|
||||||
|
// strip full import paths and just use the package name
|
||||||
|
chunks := strings.Split(frame.Function, "/")
|
||||||
|
withCaller := fmt.Sprintf("%s: %s", chunks[len(chunks)-1], msg)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf(withCaller)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%s: %w", withCaller, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap receives an error and returns the last error in the chain of
|
||||||
|
// wrapped errors
|
||||||
|
func Unwrap(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
u := errors.Unwrap(err)
|
||||||
|
if u == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = u
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package azure
|
package azure
|
||||||
@@ -17,14 +17,18 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
type azureBlobStorage struct {
|
type azureBlobStorage struct {
|
||||||
*storage.StorageBackend
|
*storage.StorageBackend
|
||||||
client *azblob.Client
|
client *azblob.Client
|
||||||
containerName string
|
uploadStreamOptions *blockblob.UploadStreamOptions
|
||||||
|
containerName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains values that define the configuration of an Azure Blob Storage.
|
// Config contains values that define the configuration of an Azure Blob Storage.
|
||||||
@@ -32,19 +36,25 @@ type Config struct {
|
|||||||
AccountName string
|
AccountName string
|
||||||
ContainerName string
|
ContainerName string
|
||||||
PrimaryAccountKey string
|
PrimaryAccountKey string
|
||||||
|
ConnectionString string
|
||||||
Endpoint string
|
Endpoint string
|
||||||
RemotePath string
|
RemotePath string
|
||||||
|
AccessTier string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
// NewStorageBackend creates and initializes a new Azure Blob Storage backend.
|
||||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
|
if opts.PrimaryAccountKey != "" && opts.ConnectionString != "" {
|
||||||
|
return nil, errwrap.Wrap(nil, "using primary account key and connection string are mutually exclusive")
|
||||||
|
}
|
||||||
|
|
||||||
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
endpointTemplate, err := template.New("endpoint").Parse(opts.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error parsing endpoint template: %w", err)
|
return nil, errwrap.Wrap(err, "error parsing endpoint template")
|
||||||
}
|
}
|
||||||
var ep bytes.Buffer
|
var ep bytes.Buffer
|
||||||
if err := endpointTemplate.Execute(&ep, opts); err != nil {
|
if err := endpointTemplate.Execute(&ep, opts); err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error executing endpoint template: %w", err)
|
return nil, errwrap.Wrap(err, "error executing endpoint template")
|
||||||
}
|
}
|
||||||
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
|
normalizedEndpoint := fmt.Sprintf("%s/", strings.TrimSuffix(ep.String(), "/"))
|
||||||
|
|
||||||
@@ -52,27 +62,49 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
if opts.PrimaryAccountKey != "" {
|
if opts.PrimaryAccountKey != "" {
|
||||||
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
|
cred, err := azblob.NewSharedKeyCredential(opts.AccountName, opts.PrimaryAccountKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating shared key Azure credential: %w", err)
|
return nil, errwrap.Wrap(err, "error creating shared key Azure credential")
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
client, err = azblob.NewClientWithSharedKeyCredential(normalizedEndpoint, cred, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating azure client from primary account key")
|
||||||
|
}
|
||||||
|
} else if opts.ConnectionString != "" {
|
||||||
|
client, err = azblob.NewClientFromConnectionString(opts.ConnectionString, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errwrap.Wrap(err, "error creating azure client from connection string")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
cred, err := azidentity.NewManagedIdentityCredential(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating managed identity credential: %w", err)
|
return nil, errwrap.Wrap(err, "error creating managed identity credential")
|
||||||
}
|
}
|
||||||
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
client, err = azblob.NewClient(normalizedEndpoint, cred, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating Azure client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating azure client from managed identity")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var uploadStreamOptions *blockblob.UploadStreamOptions
|
||||||
|
if opts.AccessTier != "" {
|
||||||
|
var found bool
|
||||||
|
for _, t := range blob.PossibleAccessTierValues() {
|
||||||
|
if string(t) == opts.AccessTier {
|
||||||
|
found = true
|
||||||
|
uploadStreamOptions = &blockblob.UploadStreamOptions{
|
||||||
|
AccessTier: &t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
return nil, errwrap.Wrap(nil, fmt.Sprintf("%s is not a possible access tier value", opts.AccessTier))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
storage := azureBlobStorage{
|
storage := azureBlobStorage{
|
||||||
client: client,
|
client: client,
|
||||||
containerName: opts.ContainerName,
|
uploadStreamOptions: uploadStreamOptions,
|
||||||
|
containerName: opts.ContainerName,
|
||||||
StorageBackend: &storage.StorageBackend{
|
StorageBackend: &storage.StorageBackend{
|
||||||
DestinationPath: opts.RemotePath,
|
DestinationPath: opts.RemotePath,
|
||||||
Log: logFunc,
|
Log: logFunc,
|
||||||
@@ -90,17 +122,18 @@ func (b *azureBlobStorage) Name() string {
|
|||||||
func (b *azureBlobStorage) Copy(file string) error {
|
func (b *azureBlobStorage) Copy(file string) error {
|
||||||
fileReader, err := os.Open(file)
|
fileReader, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*azureBlobStorage).Copy: error opening file %s: %w", file, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error opening file %s", file))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = b.client.UploadStream(
|
_, err = b.client.UploadStream(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
b.containerName,
|
b.containerName,
|
||||||
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
filepath.Join(b.DestinationPath, filepath.Base(file)),
|
||||||
fileReader,
|
fileReader,
|
||||||
nil,
|
b.uploadStreamOptions,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*azureBlobStorage).Copy: error uploading file %s: %w", file, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error uploading file %s", file))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -117,7 +150,7 @@ func (b *azureBlobStorage) Prune(deadline time.Time, pruningPrefix string) (*sto
|
|||||||
for pager.More() {
|
for pager.More() {
|
||||||
resp, err := pager.NextPage(context.Background())
|
resp, err := pager.NextPage(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*azureBlobStorage).Prune: error paging over blobs: %w", err)
|
return nil, errwrap.Wrap(err, "error paging over blobs")
|
||||||
}
|
}
|
||||||
for _, v := range resp.Segment.BlobItems {
|
for _, v := range resp.Segment.BlobItems {
|
||||||
totalCount++
|
totalCount++
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -51,7 +52,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
tkSource := conf.TokenSource(context.Background(), &oauth2.Token{RefreshToken: opts.RefreshToken})
|
||||||
token, err := tkSource.Token()
|
token, err := tkSource.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*dropboxStorage).NewStorageBackend: Error refreshing token: %w", err)
|
return nil, errwrap.Wrap(err, "error refreshing token")
|
||||||
}
|
}
|
||||||
|
|
||||||
dbxConfig := dropbox.Config{
|
dbxConfig := dropbox.Config{
|
||||||
@@ -95,29 +96,28 @@ func (b *dropboxStorage) Copy(file string) error {
|
|||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case files.CreateFolderV2APIError:
|
case files.CreateFolderV2APIError:
|
||||||
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
if err.EndpointError.Path.Tag != files.WriteErrorConflict {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Destination path '%s' already exists, no new directory required.", b.DestinationPath)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error creating directory '%s': %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s'", b.DestinationPath))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := os.Open(file)
|
r, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error opening the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
// Start new upload session and get session id
|
// Start new upload session and get session id
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Starting upload session for backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
|
|
||||||
var sessionId string
|
var sessionId string
|
||||||
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
uploadSessionStartArg := files.NewUploadSessionStartArg()
|
||||||
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
uploadSessionStartArg.SessionType = &files.UploadSessionType{Tagged: dropbox.Tagged{Tag: files.UploadSessionTypeConcurrent}}
|
||||||
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
if res, err := b.client.UploadSessionStart(uploadSessionStartArg, nil); err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error starting the upload session: %w", err)
|
return errwrap.Wrap(err, "error starting the upload session")
|
||||||
} else {
|
} else {
|
||||||
sessionId = res.SessionId
|
sessionId = res.SessionId
|
||||||
}
|
}
|
||||||
@@ -165,7 +165,7 @@ loop:
|
|||||||
|
|
||||||
bytesRead, err := r.Read(chunk)
|
bytesRead, err := r.Read(chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error reading the file to be uploaded: %w", err)
|
errorChn <- errwrap.Wrap(err, "error reading the file to be uploaded")
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -184,7 +184,7 @@ loop:
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
if err := b.client.UploadSessionAppendV2(uploadSessionAppendArg, bytes.NewReader(chunk)); err != nil {
|
||||||
errorChn <- fmt.Errorf("(*dropboxStorage).Copy: Error appending the file to the upload session: %w", err)
|
errorChn <- errwrap.Wrap(err, "error appending the file to the upload session")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -198,7 +198,7 @@ loop:
|
|||||||
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
files.NewCommitInfo(filepath.Join(b.DestinationPath, name)),
|
||||||
), nil)
|
), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Copy: Error finishing the upload session: %w", err)
|
return errwrap.Wrap(err, "error finishing the upload session")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' at path '%s'.", file, b.DestinationPath)
|
||||||
@@ -211,14 +211,14 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
|||||||
var entries []files.IsMetadata
|
var entries []files.IsMetadata
|
||||||
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
res, err := b.client.ListFolder(files.NewListFolderArg(b.DestinationPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
entries = append(entries, res.Entries...)
|
entries = append(entries, res.Entries...)
|
||||||
|
|
||||||
for res.HasMore {
|
for res.HasMore {
|
||||||
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
res, err = b.client.ListFolderContinue(files.NewListFolderContinueArg(res.Cursor))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: Error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
entries = append(entries, res.Entries...)
|
entries = append(entries, res.Entries...)
|
||||||
}
|
}
|
||||||
@@ -248,7 +248,7 @@ func (b *dropboxStorage) Prune(deadline time.Time, pruningPrefix string) (*stora
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
if _, err := b.client.DeleteV2(files.NewDeleteArg(filepath.Join(b.DestinationPath, match.Name))); err != nil {
|
||||||
return fmt.Errorf("(*dropboxStorage).Prune: Error removing file from Dropbox storage: %w", err)
|
return errwrap.Wrap(err, "error removing file from Dropbox storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package local
|
package local
|
||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -47,7 +48,7 @@ func (b *localStorage) Copy(file string) error {
|
|||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
|
|
||||||
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
if err := copyFile(file, path.Join(b.DestinationPath, name)); err != nil {
|
||||||
return fmt.Errorf("(*localStorage).Copy: Error copying file to archive: %w", err)
|
return errwrap.Wrap(err, "error copying file to archive")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Stored copy of backup `%s` in `%s`.", file, b.DestinationPath)
|
||||||
|
|
||||||
@@ -57,7 +58,7 @@ func (b *localStorage) Copy(file string) error {
|
|||||||
os.Remove(symlink)
|
os.Remove(symlink)
|
||||||
}
|
}
|
||||||
if err := os.Symlink(name, symlink); err != nil {
|
if err := os.Symlink(name, symlink); err != nil {
|
||||||
return fmt.Errorf("(*localStorage).Copy: error creating latest symlink: %w", err)
|
return errwrap.Wrap(err, "error creating latest symlink")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
|
b.Log(storage.LogLevelInfo, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink)
|
||||||
}
|
}
|
||||||
@@ -73,10 +74,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
)
|
)
|
||||||
globMatches, err := filepath.Glob(globPattern)
|
globMatches, err := filepath.Glob(globPattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error looking up matching files using pattern %s: %w",
|
|
||||||
globPattern,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error looking up matching files using pattern %s",
|
||||||
|
globPattern,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,10 +87,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
for _, candidate := range globMatches {
|
for _, candidate := range globMatches {
|
||||||
fi, err := os.Lstat(candidate)
|
fi, err := os.Lstat(candidate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error calling Lstat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error calling Lstat on file %s",
|
||||||
|
candidate,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,10 +105,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
for _, candidate := range candidates {
|
for _, candidate := range candidates {
|
||||||
fi, err := os.Stat(candidate)
|
fi, err := os.Stat(candidate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*localStorage).Prune: Error calling stat on file %s: %w",
|
|
||||||
candidate,
|
|
||||||
err,
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"error calling stat on file %s",
|
||||||
|
candidate,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if fi.ModTime().Before(deadline) {
|
if fi.ModTime().Before(deadline) {
|
||||||
@@ -124,10 +131,12 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(removeErrors) != 0 {
|
if len(removeErrors) != 0 {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*localStorage).Prune: %d error(s) deleting files, starting with: %w",
|
|
||||||
len(removeErrors),
|
|
||||||
errors.Join(removeErrors...),
|
errors.Join(removeErrors...),
|
||||||
|
fmt.Sprintf(
|
||||||
|
"%d error(s) deleting files",
|
||||||
|
len(removeErrors),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package s3
|
package s3
|
||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
} else if opts.IamRoleEndpoint != "" {
|
} else if opts.IamRoleEndpoint != "" {
|
||||||
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
creds = credentials.NewIAM(opts.IamRoleEndpoint)
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.New("NewStorageBackend: AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
return nil, errwrap.Wrap(nil, "AWS_S3_BUCKET_NAME is defined, but no credentials were provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
options := minio.Options{
|
options := minio.Options{
|
||||||
@@ -63,12 +64,12 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
|
|
||||||
transport, err := minio.DefaultTransport(true)
|
transport, err := minio.DefaultTransport(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: failed to create default minio transport: %w", err)
|
return nil, errwrap.Wrap(err, "failed to create default minio transport")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.EndpointInsecure {
|
if opts.EndpointInsecure {
|
||||||
if !options.Secure {
|
if !options.Secure {
|
||||||
return nil, errors.New("NewStorageBackend: AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
return nil, errwrap.Wrap(nil, "AWS_ENDPOINT_INSECURE = true is only meaningful for https")
|
||||||
}
|
}
|
||||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||||
} else if opts.CACert != nil {
|
} else if opts.CACert != nil {
|
||||||
@@ -81,7 +82,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
|
|
||||||
mc, err := minio.New(opts.Endpoint, &options)
|
mc, err := minio.New(opts.Endpoint, &options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error setting up minio client: %w", err)
|
return nil, errwrap.Wrap(err, "error setting up minio client")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &s3Storage{
|
return &s3Storage{
|
||||||
@@ -112,12 +113,12 @@ func (b *s3Storage) Copy(file string) error {
|
|||||||
if b.partSize > 0 {
|
if b.partSize > 0 {
|
||||||
srcFileInfo, err := os.Stat(file)
|
srcFileInfo, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error reading the local file: %w", err)
|
return errwrap.Wrap(err, "error reading the local file")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
_, partSize, _, err := minio.OptimalPartInfo(srcFileInfo.Size(), uint64(b.partSize*1024*1024))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error computing the optimal s3 part size: %w", err)
|
return errwrap.Wrap(err, "error computing the optimal s3 part size")
|
||||||
}
|
}
|
||||||
|
|
||||||
putObjectOptions.PartSize = uint64(partSize)
|
putObjectOptions.PartSize = uint64(partSize)
|
||||||
@@ -125,14 +126,17 @@ func (b *s3Storage) Copy(file string) error {
|
|||||||
|
|
||||||
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, putObjectOptions); err != nil {
|
||||||
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
if errResp := minio.ToErrorResponse(err); errResp.Message != "" {
|
||||||
return fmt.Errorf(
|
return errwrap.Wrap(
|
||||||
"(*s3Storage).Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
nil,
|
||||||
errResp.Message,
|
fmt.Sprintf(
|
||||||
errResp.Code,
|
"error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d",
|
||||||
errResp.StatusCode,
|
errResp.Message,
|
||||||
|
errResp.Code,
|
||||||
|
errResp.StatusCode,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("(*s3Storage).Copy: error uploading backup to remote storage: %w", err)
|
return errwrap.Wrap(err, "error uploading backup to remote storage")
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket)
|
||||||
@@ -152,9 +156,9 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr
|
|||||||
for candidate := range candidates {
|
for candidate := range candidates {
|
||||||
lenCandidates++
|
lenCandidates++
|
||||||
if candidate.Err != nil {
|
if candidate.Err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errwrap.Wrap(
|
||||||
"(*s3Storage).Prune: error looking up candidates from remote storage! %w",
|
|
||||||
candidate.Err,
|
candidate.Err,
|
||||||
|
"error looking up candidates from remote storage",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if candidate.LastModified.Before(deadline) {
|
if candidate.LastModified.Before(deadline) {
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package ssh
|
package ssh
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -13,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@@ -47,20 +47,20 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
if _, err := os.Stat(opts.IdentityFile); err == nil {
|
||||||
key, err := os.ReadFile(opts.IdentityFile)
|
key, err := os.ReadFile(opts.IdentityFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error reading the private key")
|
return nil, errwrap.Wrap(nil, "error reading the private key")
|
||||||
}
|
}
|
||||||
|
|
||||||
var signer ssh.Signer
|
var signer ssh.Signer
|
||||||
if opts.IdentityPassphrase != "" {
|
if opts.IdentityPassphrase != "" {
|
||||||
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
|
signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(opts.IdentityPassphrase))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error parsing the encrypted private key")
|
return nil, errwrap.Wrap(nil, "error parsing the encrypted private key")
|
||||||
}
|
}
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
} else {
|
} else {
|
||||||
signer, err = ssh.ParsePrivateKey(key)
|
signer, err = ssh.ParsePrivateKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("NewStorageBackend: error parsing the private key")
|
return nil, errwrap.Wrap(nil, "error parsing the private key")
|
||||||
}
|
}
|
||||||
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
authMethods = append(authMethods, ssh.PublicKeys(signer))
|
||||||
}
|
}
|
||||||
@@ -74,7 +74,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", opts.HostName, opts.Port), sshClientConfig)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating ssh client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating ssh client")
|
||||||
}
|
}
|
||||||
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
_, _, err = sshClient.SendRequest("keepalive", false, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -87,7 +87,7 @@ func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error
|
|||||||
sftp.MaxConcurrentRequestsPerFile(64),
|
sftp.MaxConcurrentRequestsPerFile(64),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewStorageBackend: error creating sftp client: %w", err)
|
return nil, errwrap.Wrap(err, "error creating sftp client")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &sshStorage{
|
return &sshStorage{
|
||||||
@@ -111,13 +111,13 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
source, err := os.Open(file)
|
source, err := os.Open(file)
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error reading the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, " error reading the file to be uploaded")
|
||||||
}
|
}
|
||||||
defer source.Close()
|
defer source.Close()
|
||||||
|
|
||||||
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error creating file: %w", err)
|
return errwrap.Wrap(err, "error creating file")
|
||||||
}
|
}
|
||||||
defer destination.Close()
|
defer destination.Close()
|
||||||
|
|
||||||
@@ -127,27 +127,27 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
return errors.New("(*sshStorage).Copy: failed to write stream")
|
return errwrap.Wrap(nil, "failed to write stream")
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
tot, err := destination.Write(chunk[:num])
|
tot, err := destination.Write(chunk[:num])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if tot != len(chunk[:num]) {
|
if tot != len(chunk[:num]) {
|
||||||
return fmt.Errorf("(*sshStorage).Copy: failed to write stream")
|
return errwrap.Wrap(nil, "failed to write stream")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,7 +160,7 @@ func (b *sshStorage) Copy(file string) error {
|
|||||||
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
candidates, err := b.sftpClient.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*sshStorage).Prune: error reading directory: %w", err)
|
return nil, errwrap.Wrap(err, "error reading directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
var matches []string
|
var matches []string
|
||||||
@@ -181,7 +181,7 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), len(candidates), deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil {
|
||||||
return fmt.Errorf("(*sshStorage).Prune: error removing file: %w", err)
|
return errwrap.Wrap(err, "error removing file")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backend is an interface for defining functions which all storage providers support.
|
// Backend is an interface for defining functions which all storage providers support.
|
||||||
@@ -26,7 +27,6 @@ type LogLevel int
|
|||||||
const (
|
const (
|
||||||
LogLevelInfo LogLevel = iota
|
LogLevelInfo LogLevel = iota
|
||||||
LogLevelWarning
|
LogLevelWarning
|
||||||
LogLevelError
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Log func(logType LogLevel, context string, msg string, params ...any)
|
type Log func(logType LogLevel, context string, msg string, params ...any)
|
||||||
@@ -47,7 +47,7 @@ func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int,
|
|||||||
|
|
||||||
formattedDeadline, err := deadline.Local().MarshalText()
|
formattedDeadline, err := deadline.Local().MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*StorageBackend).DoPrune: error marshaling deadline: %w", err)
|
return errwrap.Wrap(err, "error marshaling deadline")
|
||||||
}
|
}
|
||||||
b.Log(LogLevelInfo, context,
|
b.Log(LogLevelInfo, context,
|
||||||
"Pruned %d out of %d backups as they were older than the given deadline of %s.",
|
"Pruned %d out of %d backups as they were older than the given deadline of %s.",
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
// Copyright 2022 - Offen Authors <hioffen@posteo.de>
|
// Copyright 2022 - offen.software <hioffen@posteo.de>
|
||||||
// SPDX-License-Identifier: MPL-2.0
|
// SPDX-License-Identifier: MPL-2.0
|
||||||
|
|
||||||
package webdav
|
package webdav
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -14,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/offen/docker-volume-backup/internal/errwrap"
|
||||||
"github.com/offen/docker-volume-backup/internal/storage"
|
"github.com/offen/docker-volume-backup/internal/storage"
|
||||||
"github.com/studio-b12/gowebdav"
|
"github.com/studio-b12/gowebdav"
|
||||||
)
|
)
|
||||||
@@ -36,14 +36,14 @@ type Config struct {
|
|||||||
// NewStorageBackend creates and initializes a new WebDav storage backend.
|
// NewStorageBackend creates and initializes a new WebDav storage backend.
|
||||||
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
func NewStorageBackend(opts Config, logFunc storage.Log) (storage.Backend, error) {
|
||||||
if opts.Username == "" || opts.Password == "" {
|
if opts.Username == "" || opts.Password == "" {
|
||||||
return nil, errors.New("NewStorageBackend: WEBDAV_URL is defined, but no credentials were provided")
|
return nil, errwrap.Wrap(nil, "WEBDAV_URL is defined, but no credentials were provided")
|
||||||
} else {
|
} else {
|
||||||
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
|
webdavClient := gowebdav.NewClient(opts.URL, opts.Username, opts.Password)
|
||||||
|
|
||||||
if opts.URLInsecure {
|
if opts.URLInsecure {
|
||||||
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
defaultTransport, ok := http.DefaultTransport.(*http.Transport)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("NewStorageBackend: unexpected error when asserting type for http.DefaultTransport")
|
return nil, errwrap.Wrap(nil, "unexpected error when asserting type for http.DefaultTransport")
|
||||||
}
|
}
|
||||||
webdavTransport := defaultTransport.Clone()
|
webdavTransport := defaultTransport.Clone()
|
||||||
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
|
webdavTransport.TLSClientConfig.InsecureSkipVerify = opts.URLInsecure
|
||||||
@@ -69,16 +69,16 @@ func (b *webDavStorage) Name() string {
|
|||||||
func (b *webDavStorage) Copy(file string) error {
|
func (b *webDavStorage) Copy(file string) error {
|
||||||
_, name := path.Split(file)
|
_, name := path.Split(file)
|
||||||
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error creating directory '%s' on server: %w", b.DestinationPath, err)
|
return errwrap.Wrap(err, fmt.Sprintf("error creating directory '%s' on server", b.DestinationPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := os.Open(file)
|
r, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error opening the file to be uploaded: %w", err)
|
return errwrap.Wrap(err, "error opening the file to be uploaded")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
if err := b.client.WriteStream(filepath.Join(b.DestinationPath, name), r, 0644); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Copy: error uploading the file: %w", err)
|
return errwrap.Wrap(err, "error uploading the file")
|
||||||
}
|
}
|
||||||
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
b.Log(storage.LogLevelInfo, b.Name(), "Uploaded a copy of backup '%s' to '%s' at path '%s'.", file, b.url, b.DestinationPath)
|
||||||
|
|
||||||
@@ -89,7 +89,7 @@ func (b *webDavStorage) Copy(file string) error {
|
|||||||
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) {
|
||||||
candidates, err := b.client.ReadDir(b.DestinationPath)
|
candidates, err := b.client.ReadDir(b.DestinationPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("(*webDavStorage).Prune: error looking up candidates from remote storage: %w", err)
|
return nil, errwrap.Wrap(err, "error looking up candidates from remote storage")
|
||||||
}
|
}
|
||||||
var matches []fs.FileInfo
|
var matches []fs.FileInfo
|
||||||
var lenCandidates int
|
var lenCandidates int
|
||||||
@@ -111,7 +111,7 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag
|
|||||||
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
pruneErr := b.DoPrune(b.Name(), len(matches), lenCandidates, deadline, func() error {
|
||||||
for _, match := range matches {
|
for _, match := range matches {
|
||||||
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil {
|
||||||
return fmt.Errorf("(*webDavStorage).Prune: error removing file: %w", err)
|
return errwrap.Wrap(err, "error removing file")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
FROM docker:24-dind
|
FROM docker:27-dind
|
||||||
|
|
||||||
RUN apk add \
|
RUN apk add \
|
||||||
coreutils \
|
coreutils \
|
||||||
curl \
|
curl \
|
||||||
gpg \
|
gpg \
|
||||||
|
gpg-agent \
|
||||||
jq \
|
jq \
|
||||||
moreutils \
|
moreutils \
|
||||||
tar \
|
tar \
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
storage:
|
storage:
|
||||||
image: mcr.microsoft.com/azure-storage/azurite:3.26.0
|
image: mcr.microsoft.com/azure-storage/azurite:3.31.0
|
||||||
volumes:
|
volumes:
|
||||||
- ${DATA_DIR:-./data}:/data
|
- ${DATA_DIR:-./data}:/data
|
||||||
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
command: azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --location /data
|
||||||
@@ -36,6 +34,7 @@ services:
|
|||||||
AZURE_STORAGE_CONTAINER_NAME: test-container
|
AZURE_STORAGE_CONTAINER_NAME: test-container
|
||||||
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
AZURE_STORAGE_ENDPOINT: http://storage:10000/{{ .AccountName }}/
|
||||||
AZURE_STORAGE_PATH: 'path/to/backup'
|
AZURE_STORAGE_PATH: 'path/to/backup'
|
||||||
|
AZURE_STORAGE_ACCESS_TIER: Hot
|
||||||
BACKUP_FILENAME: test.tar.gz
|
BACKUP_FILENAME: test.tar.gz
|
||||||
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
hostname: minio.local
|
hostname: minio.local
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
database:
|
database:
|
||||||
image: mariadb:10.7
|
image: mariadb:10.7
|
||||||
|
|||||||
@@ -1,2 +1,6 @@
|
|||||||
BACKUP_FILENAME="conf.tar.gz"
|
# This is a comment
|
||||||
|
# NOT=$(docker ps -aq)
|
||||||
|
# e.g. `backup-$HOSTNAME-%Y-%m-%dT%H-%M-%S.tar.gz`. Expansion happens before`
|
||||||
|
|
||||||
|
NAME="$EXPANSION_VALUE"
|
||||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
BACKUP_FILENAME="other.tar.gz"
|
NAME="other"
|
||||||
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
BACKUP_CRON_EXPRESSION="*/1 * * * *"
|
||||||
|
BACKUP_FILENAME="override-$NAME.tar.gz"
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
BACKUP_FILENAME="never.tar.gz"
|
NAME="never"
|
||||||
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
restart: always
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: $$NAME.tar.gz
|
||||||
|
BACKUP_FILENAME_EXPAND: 'true'
|
||||||
|
EXPANSION_VALUE: conf
|
||||||
volumes:
|
volumes:
|
||||||
- ${LOCAL_DIR:-./local}:/archive
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
- app_data:/backup/app_data:ro
|
- app_data:/backup/app_data:ro
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ if [ ! -f "$LOCAL_DIR/conf.tar.gz" ]; then
|
|||||||
fi
|
fi
|
||||||
pass "Config from file was used."
|
pass "Config from file was used."
|
||||||
|
|
||||||
if [ ! -f "$LOCAL_DIR/other.tar.gz" ]; then
|
if [ ! -f "$LOCAL_DIR/override-other.tar.gz" ]; then
|
||||||
fail "Run on same schedule did not succeed."
|
fail "Run on same schedule did not succeed."
|
||||||
fi
|
fi
|
||||||
pass "Run on same schedule succeeded."
|
pass "Run on same schedule succeeded."
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
openapi_mock:
|
openapi_mock:
|
||||||
image: muonsoft/openapi-mock:0.3.9
|
image: muonsoft/openapi-mock:0.3.9
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
25
test/gpg-asym/docker-compose.yml
Normal file
25
test/gpg-asym/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_CRON_EXPRESSION: 0 0 5 31 2 ?
|
||||||
|
BACKUP_FILENAME: test.tar.gz
|
||||||
|
BACKUP_LATEST_SYMLINK: test-latest.tar.gz.gpg
|
||||||
|
BACKUP_RETENTION_DAYS: ${BACKUP_RETENTION_DAYS:-7}
|
||||||
|
GPG_PUBLIC_KEY_RING_FILE: /keys/public_key.asc
|
||||||
|
volumes:
|
||||||
|
- ${KEY_DIR:-.}/public_key.asc:/keys/public_key.asc
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
49
test/gpg-asym/run.sh
Executable file
49
test/gpg-asym/run.sh
Executable file
@@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export KEY_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
export PASSPHRASE="test"
|
||||||
|
|
||||||
|
gpg --batch --gen-key <<EOF
|
||||||
|
Key-Type: RSA
|
||||||
|
Key-Length: 4096
|
||||||
|
Name-Real: offen
|
||||||
|
Name-Email: docker-volume-backup@local
|
||||||
|
Expire-Date: 0
|
||||||
|
Passphrase: $PASSPHRASE
|
||||||
|
%commit
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gpg --export --armor --batch --yes --pinentry-mode loopback --passphrase $PASSPHRASE --output $KEY_DIR/public_key.asc
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
TMP_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
gpg -d --pinentry-mode loopback --yes --passphrase $PASSPHRASE "$LOCAL_DIR/test.tar.gz.gpg" > "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
tar -xf "$LOCAL_DIR/decrypted.tar.gz" -C $TMP_DIR
|
||||||
|
|
||||||
|
if [ ! -f $TMP_DIR/backup/app_data/offen.db ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
rm "$LOCAL_DIR/decrypted.tar.gz"
|
||||||
|
|
||||||
|
pass "Found relevant files in decrypted and untared local backup."
|
||||||
|
|
||||||
|
if [ ! -L "$LOCAL_DIR/test-latest.tar.gz.gpg" ]; then
|
||||||
|
fail "Could not find local symlink to latest encrypted backup."
|
||||||
|
fi
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
7
test/nonroot/01conf.env
Normal file
7
test/nonroot/01conf.env
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
AWS_ACCESS_KEY_ID="test"
|
||||||
|
AWS_SECRET_ACCESS_KEY="GMusLtUmILge2by+z890kQ"
|
||||||
|
AWS_ENDPOINT="minio:9000"
|
||||||
|
AWS_ENDPOINT_PROTO="http"
|
||||||
|
AWS_S3_BUCKET_NAME="backup"
|
||||||
|
BACKUP_CRON_EXPRESSION="0 0 5 31 2 ?"
|
||||||
|
BACKUP_FILENAME="test.tar.gz"
|
||||||
31
test/nonroot/docker-compose.yml
Normal file
31
test/nonroot/docker-compose.yml
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
services:
|
||||||
|
minio:
|
||||||
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
environment:
|
||||||
|
MINIO_ROOT_USER: test
|
||||||
|
MINIO_ROOT_PASSWORD: test
|
||||||
|
MINIO_ACCESS_KEY: test
|
||||||
|
MINIO_SECRET_KEY: GMusLtUmILge2by+z890kQ
|
||||||
|
entrypoint: /bin/ash -c 'mkdir -p /data/backup && minio server /data'
|
||||||
|
volumes:
|
||||||
|
- ${LOCAL_DIR:-local}:/data
|
||||||
|
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
user: 1000:1000
|
||||||
|
depends_on:
|
||||||
|
- minio
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- ./01conf.env:/etc/dockervolumebackup/conf.d/01conf.env
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
27
test/nonroot/run.sh
Executable file
27
test/nonroot/run.sh
Executable file
@@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose logs backup
|
||||||
|
|
||||||
|
# conf.d is used to confirm /etc files are also accessible for non-root users
|
||||||
|
docker compose exec backup /bin/sh -c 'set -a; source /etc/dockervolumebackup/conf.d/01conf.env; set +a && backup'
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "3"
|
||||||
|
|
||||||
|
if [ ! -f "$LOCAL_DIR/backup/test.tar.gz" ]; then
|
||||||
|
fail "Could not find archive."
|
||||||
|
fi
|
||||||
|
pass "Archive was created."
|
||||||
|
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
image: postgres:14-alpine
|
image: postgres:14-alpine
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
backup:
|
backup:
|
||||||
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
ssh:
|
ssh:
|
||||||
image: linuxserver/openssh-server:version-8.6_p1-r3
|
image: linuxserver/openssh-server:version-8.6_p1-r3
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
# Copyright 2020-2021 - Offen Authors <hioffen@posteo.de>
|
# Copyright 2020-2021 - offen.software <hioffen@posteo.de>
|
||||||
# SPDX-License-Identifier: Unlicense
|
# SPDX-License-Identifier: Unlicense
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
minio:
|
minio:
|
||||||
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
image: minio/minio:RELEASE.2020-08-04T23-10-51Z
|
||||||
|
|||||||
21
test/tar/docker-compose.yml
Normal file
21
test/tar/docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
services:
|
||||||
|
backup:
|
||||||
|
image: offen/docker-volume-backup:${TEST_VERSION:-canary}
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
BACKUP_FILENAME: test.{{ .Extension }}
|
||||||
|
BACKUP_COMPRESSION: none
|
||||||
|
volumes:
|
||||||
|
- app_data:/backup/app_data:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
- ${LOCAL_DIR:-./local}:/archive
|
||||||
|
|
||||||
|
offen:
|
||||||
|
image: offen/offen:latest
|
||||||
|
labels:
|
||||||
|
- docker-volume-backup.stop-during-backup=true
|
||||||
|
volumes:
|
||||||
|
- app_data:/var/opt/offen
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
app_data:
|
||||||
25
test/tar/run.sh
Executable file
25
test/tar/run.sh
Executable file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
. ../util.sh
|
||||||
|
current_test=$(basename $(pwd))
|
||||||
|
|
||||||
|
export LOCAL_DIR=$(mktemp -d)
|
||||||
|
|
||||||
|
docker compose up -d --quiet-pull
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
docker compose exec backup backup
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
|
expect_running_containers "2"
|
||||||
|
|
||||||
|
tmp_dir=$(mktemp -d)
|
||||||
|
tar -xvf "$LOCAL_DIR/test.tar" -C $tmp_dir
|
||||||
|
if [ ! -f "$tmp_dir/backup/app_data/offen.db" ]; then
|
||||||
|
fail "Could not find expected file in untared archive."
|
||||||
|
fi
|
||||||
|
pass "Expected file was found."
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '2.4'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
alpine:
|
alpine:
|
||||||
image: alpine:3.17.3
|
image: alpine:3.17.3
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ skip () {
|
|||||||
|
|
||||||
expect_running_containers () {
|
expect_running_containers () {
|
||||||
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
|
if [ "$(docker ps -q | wc -l)" != "$1" ]; then
|
||||||
fail "Expected $1 containers to be running, instead seen: "$(docker ps -a | wc -l)""
|
fail "Expected $1 containers to be running, instead seen: "$(docker ps -q | wc -l)""
|
||||||
fi
|
fi
|
||||||
pass "$1 containers running."
|
pass "$1 containers running."
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
webdav:
|
webdav:
|
||||||
image: bytemark/webdav:2.4
|
image: bytemark/webdav:2.4
|
||||||
|
|||||||
Reference in New Issue
Block a user