mirror of
https://github.com/offen/docker-volume-backup.git
synced 2025-12-06 17:38:01 +01:00
Compare commits
5 Commits
v2.0.0-alp
...
v2.0.0-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a75b725dc | ||
|
|
8c46bd54aa | ||
|
|
edefe69e6f | ||
|
|
01d1a60bb7 | ||
|
|
f15379795f |
@@ -79,7 +79,7 @@ AWS_S3_BUCKET_NAME="<xxx>"
|
|||||||
# that is expected to be bigger than the maximum difference of backups.
|
# that is expected to be bigger than the maximum difference of backups.
|
||||||
# Valid values have a suffix of (s)econds, (m)inutes or (h)ours.
|
# Valid values have a suffix of (s)econds, (m)inutes or (h)ours.
|
||||||
|
|
||||||
# BACKUP_PRUNING_LEEWAY="10m"
|
# BACKUP_PRUNING_LEEWAY="1m"
|
||||||
|
|
||||||
# In case your target bucket or directory contains other files than the ones
|
# In case your target bucket or directory contains other files than the ones
|
||||||
# managed by this container, you can limit the scope of rotation by setting
|
# managed by this container, you can limit the scope of rotation by setting
|
||||||
@@ -176,9 +176,9 @@ docker exec <container_ref> backup
|
|||||||
|
|
||||||
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
This image is heavily inspired by the `futurice/docker-volume-backup`. We decided to publish this image as a simpler and more lightweight alternative because of the following requirements:
|
||||||
|
|
||||||
- The original image is based on `ubuntu`, making it very heavy. This version is roughly 1/3 in compressed size.
|
- The original image is based on `ubuntu` and additional tools, making it very heavy. This version is roughly 1/25 in compressed size (it's ~12MB).
|
||||||
- The original image uses a shell script, when this is written in Go.
|
- The original image uses a shell script, when this is written in Go.
|
||||||
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO. Local backups can also be pruned once they reach a certain age.
|
- The original image proposed to handle backup rotation through AWS S3 lifecycle policies. This image adds the option to rotate away old backups through the same command so this functionality can also be offered for non-AWS storage backends like MinIO. Local copies of backups can also be pruned once they reach a certain age.
|
||||||
- InfluxDB specific functionality was removed.
|
- InfluxDB specific functionality was removed.
|
||||||
- `arm64` and `arm/v7` architectures are supported.
|
- `arm64` and `arm/v7` architectures are supported.
|
||||||
- Docker in Swarm mode is supported.
|
- Docker in Swarm mode is supported.
|
||||||
|
|||||||
@@ -11,11 +11,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
@@ -23,6 +21,7 @@ import (
|
|||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/leekchan/timeutil"
|
||||||
minio "github.com/minio/minio-go/v7"
|
minio "github.com/minio/minio-go/v7"
|
||||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@@ -41,18 +40,26 @@ func main() {
|
|||||||
s.must(s.copyBackup())
|
s.must(s.copyBackup())
|
||||||
s.must(s.cleanBackup())
|
s.must(s.cleanBackup())
|
||||||
s.must(s.pruneOldBackups())
|
s.must(s.pruneOldBackups())
|
||||||
|
s.logger.Info("Finished running backup tasks.")
|
||||||
}
|
}
|
||||||
|
|
||||||
type script struct {
|
type script struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cli *client.Client
|
cli *client.Client
|
||||||
mc *minio.Client
|
mc *minio.Client
|
||||||
logger *logrus.Logger
|
logger *logrus.Logger
|
||||||
file string
|
|
||||||
bucket string
|
start time.Time
|
||||||
archive string
|
|
||||||
sources string
|
file string
|
||||||
passphrase string
|
bucket string
|
||||||
|
archive string
|
||||||
|
sources string
|
||||||
|
passphrase []byte
|
||||||
|
retentionDays *int
|
||||||
|
leeway *time.Duration
|
||||||
|
containerLabel string
|
||||||
|
pruningPrefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
// lock opens a lockfile at the given location, keeping it locked until the
|
// lock opens a lockfile at the given location, keeping it locked until the
|
||||||
@@ -117,7 +124,27 @@ func (s *script) init() error {
|
|||||||
s.file = path.Join("/tmp", file)
|
s.file = path.Join("/tmp", file)
|
||||||
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
s.archive = os.Getenv("BACKUP_ARCHIVE")
|
||||||
s.sources = os.Getenv("BACKUP_SOURCES")
|
s.sources = os.Getenv("BACKUP_SOURCES")
|
||||||
s.passphrase = os.Getenv("GPG_PASSPHRASE")
|
if v := os.Getenv("GPG_PASSPHRASE"); v != "" {
|
||||||
|
s.passphrase = []byte(v)
|
||||||
|
}
|
||||||
|
if v := os.Getenv("BACKUP_RETENTION_DAYS"); v != "" {
|
||||||
|
i, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
||||||
|
}
|
||||||
|
s.retentionDays = &i
|
||||||
|
}
|
||||||
|
if v := os.Getenv("BACKUP_PRUNING_LEEWAY"); v != "" {
|
||||||
|
d, err := time.ParseDuration(v)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("init: error parsing BACKUP_PRUNING_LEEWAY as duration: %w", err)
|
||||||
|
}
|
||||||
|
s.leeway = &d
|
||||||
|
}
|
||||||
|
s.containerLabel = os.Getenv("BACKUP_STOP_CONTAINER_LABEL")
|
||||||
|
s.pruningPrefix = os.Getenv("BACKUP_PRUNING_PREFIX")
|
||||||
|
s.start = time.Now()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,8 +153,9 @@ func (s *script) init() error {
|
|||||||
// sure containers are being restarted if required.
|
// sure containers are being restarted if required.
|
||||||
func (s *script) stopContainersAndRun(thunk func() error) error {
|
func (s *script) stopContainersAndRun(thunk func() error) error {
|
||||||
if s.cli == nil {
|
if s.cli == nil {
|
||||||
return nil
|
return thunk()
|
||||||
}
|
}
|
||||||
|
|
||||||
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
allContainers, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
||||||
Quiet: true,
|
Quiet: true,
|
||||||
})
|
})
|
||||||
@@ -137,7 +165,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|||||||
|
|
||||||
containerLabel := fmt.Sprintf(
|
containerLabel := fmt.Sprintf(
|
||||||
"docker-volume-backup.stop-during-backup=%s",
|
"docker-volume-backup.stop-during-backup=%s",
|
||||||
os.Getenv("BACKUP_STOP_CONTAINER_LABEL"),
|
s.containerLabel,
|
||||||
)
|
)
|
||||||
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
containersToStop, err := s.cli.ContainerList(s.ctx, types.ContainerListOptions{
|
||||||
Quiet: true,
|
Quiet: true,
|
||||||
@@ -151,7 +179,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|||||||
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
return fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof(
|
s.logger.Infof(
|
||||||
"Stopping %d containers labeled `%s` out of %d running containers.",
|
"Stopping %d containers labeled `%s` out of %d running container(s).",
|
||||||
len(containersToStop),
|
len(containersToStop),
|
||||||
containerLabel,
|
containerLabel,
|
||||||
len(allContainers),
|
len(allContainers),
|
||||||
@@ -214,7 +242,7 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|||||||
err,
|
err,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully restarted %d containers.", len(stoppedContainers))
|
s.logger.Infof("Restarted %d container(s) and the matching service(s).", len(stoppedContainers))
|
||||||
return nil
|
return nil
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -236,15 +264,11 @@ func (s *script) stopContainersAndRun(thunk func() error) error {
|
|||||||
// takeBackup creates a tar archive of the configured backup location and
|
// takeBackup creates a tar archive of the configured backup location and
|
||||||
// saves it to disk.
|
// saves it to disk.
|
||||||
func (s *script) takeBackup() error {
|
func (s *script) takeBackup() error {
|
||||||
outBytes, err := exec.Command("date", fmt.Sprintf("+%s", s.file)).Output()
|
s.file = timeutil.Strftime(&s.start, s.file)
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("takeBackup: error formatting filename template: %w", err)
|
|
||||||
}
|
|
||||||
s.file = strings.TrimSpace(string(outBytes))
|
|
||||||
if err := targz.Compress(s.sources, s.file); err != nil {
|
if err := targz.Compress(s.sources, s.file); err != nil {
|
||||||
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
return fmt.Errorf("takeBackup: error compressing backup folder: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully created backup of `%s` at `%s`.", s.sources, s.file)
|
s.logger.Infof("Created backup of `%s` at `%s`.", s.sources, s.file)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,7 +276,7 @@ func (s *script) takeBackup() error {
|
|||||||
// In case no passphrase is given it returns early, leaving the backup file
|
// In case no passphrase is given it returns early, leaving the backup file
|
||||||
// untouched.
|
// untouched.
|
||||||
func (s *script) encryptBackup() error {
|
func (s *script) encryptBackup() error {
|
||||||
if s.passphrase == "" {
|
if s.passphrase == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -286,8 +310,9 @@ func (s *script) encryptBackup() error {
|
|||||||
if err := os.Remove(s.file); err != nil {
|
if err := os.Remove(s.file); err != nil {
|
||||||
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
return fmt.Errorf("encryptBackup: error removing unencrpyted backup: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.file = gpgFile
|
s.file = gpgFile
|
||||||
s.logger.Infof("Successfully encrypted backup using given passphrase, saving as `%s`.", s.file)
|
s.logger.Infof("Encrypted backup using given passphrase, saving as `%s`.", s.file)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -302,16 +327,14 @@ func (s *script) copyBackup() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`", s.file, s.bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.archive != "" {
|
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
||||||
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
|
||||||
if err := copy(s.file, path.Join(s.archive, name)); err != nil {
|
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
||||||
return fmt.Errorf("copyBackup: error copying file to local archive: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
s.logger.Infof("Successfully stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
s.logger.Infof("Stored copy of backup `%s` in local archive `%s`", s.file, s.archive)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -321,7 +344,7 @@ func (s *script) cleanBackup() error {
|
|||||||
if err := os.Remove(s.file); err != nil {
|
if err := os.Remove(s.file); err != nil {
|
||||||
return fmt.Errorf("cleanBackup: error removing file: %w", err)
|
return fmt.Errorf("cleanBackup: error removing file: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("Successfully cleaned up local artifacts.")
|
s.logger.Info("Cleaned up local artifacts.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -329,29 +352,22 @@ func (s *script) cleanBackup() error {
|
|||||||
// the given configuration. In case the given configuration would delete all
|
// the given configuration. In case the given configuration would delete all
|
||||||
// backups, it does nothing instead.
|
// backups, it does nothing instead.
|
||||||
func (s *script) pruneOldBackups() error {
|
func (s *script) pruneOldBackups() error {
|
||||||
retention := os.Getenv("BACKUP_RETENTION_DAYS")
|
if s.retentionDays == nil {
|
||||||
if retention == "" {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
retentionDays, err := strconv.Atoi(retention)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("pruneOldBackups: error parsing BACKUP_RETENTION_DAYS as int: %w", err)
|
|
||||||
}
|
|
||||||
leeway := os.Getenv("BACKUP_PRUNING_LEEWAY")
|
|
||||||
sleepFor, err := time.ParseDuration(leeway)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("pruneBackups: error parsing given leeway value: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Infof("Sleeping for %s before pruning backups.", leeway)
|
|
||||||
time.Sleep(sleepFor)
|
|
||||||
|
|
||||||
s.logger.Infof("Trying to prune backups older than %d days now.", retentionDays)
|
if s.leeway != nil {
|
||||||
deadline := time.Now().AddDate(0, 0, -retentionDays)
|
s.logger.Infof("Sleeping for %s before pruning backups.", s.leeway)
|
||||||
|
time.Sleep(*s.leeway)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.logger.Infof("Trying to prune backups older than %d day(s) now.", *s.retentionDays)
|
||||||
|
deadline := s.start.AddDate(0, 0, -*s.retentionDays)
|
||||||
|
|
||||||
if s.bucket != "" {
|
if s.bucket != "" {
|
||||||
candidates := s.mc.ListObjects(s.ctx, s.bucket, minio.ListObjectsOptions{
|
candidates := s.mc.ListObjects(s.ctx, s.bucket, minio.ListObjectsOptions{
|
||||||
WithMetadata: true,
|
WithMetadata: true,
|
||||||
Prefix: os.Getenv("BACKUP_PRUNING_PREFIX"),
|
Prefix: s.pruningPrefix,
|
||||||
})
|
})
|
||||||
|
|
||||||
var matches []minio.ObjectInfo
|
var matches []minio.ObjectInfo
|
||||||
@@ -384,13 +400,13 @@ func (s *script) pruneOldBackups() error {
|
|||||||
|
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"pruneOldBackups: %d errors removing files from remote storage: %w",
|
"pruneOldBackups: %d error(s) removing files from remote storage: %w",
|
||||||
len(errors),
|
len(errors),
|
||||||
errors[0],
|
errors[0],
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
s.logger.Infof(
|
s.logger.Infof(
|
||||||
"Successfully pruned %d out of %d remote backups as their age exceeded the configured retention period.",
|
"Pruned %d out of %d remote backup(s) as their age exceeded the configured retention period.",
|
||||||
len(matches),
|
len(matches),
|
||||||
lenCandidates,
|
lenCandidates,
|
||||||
)
|
)
|
||||||
@@ -400,13 +416,13 @@ func (s *script) pruneOldBackups() error {
|
|||||||
len(matches),
|
len(matches),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Infof("None of %d remote backups were pruned.", lenCandidates)
|
s.logger.Infof("None of %d remote backup(s) were pruned.", lenCandidates)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.archive != "" {
|
if _, err := os.Stat(s.archive); !os.IsNotExist(err) {
|
||||||
candidates, err := filepath.Glob(
|
candidates, err := filepath.Glob(
|
||||||
path.Join(s.archive, fmt.Sprintf("%s*", os.Getenv("BACKUP_PRUNING_PREFIX"))),
|
path.Join(s.archive, fmt.Sprintf("%s*", s.pruningPrefix)),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
@@ -439,13 +455,13 @@ func (s *script) pruneOldBackups() error {
|
|||||||
}
|
}
|
||||||
if len(errors) != 0 {
|
if len(errors) != 0 {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"pruneOldBackups: %d errors deleting local files, starting with: %w",
|
"pruneOldBackups: %d error(s) deleting local files, starting with: %w",
|
||||||
len(errors),
|
len(errors),
|
||||||
errors[0],
|
errors[0],
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
s.logger.Infof(
|
s.logger.Infof(
|
||||||
"Successfully pruned %d out of %d local backups as their age exceeded the configured retention period.",
|
"Pruned %d out of %d local backup(s) as their age exceeded the configured retention period.",
|
||||||
len(matches),
|
len(matches),
|
||||||
len(candidates),
|
len(candidates),
|
||||||
)
|
)
|
||||||
@@ -455,7 +471,7 @@ func (s *script) pruneOldBackups() error {
|
|||||||
len(matches),
|
len(matches),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
s.logger.Infof("None of %d local backups were pruned.", len(candidates))
|
s.logger.Infof("None of %d local backup(s) were pruned.", len(candidates))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ BACKUP_FILENAME="${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}"
|
|||||||
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
|
BACKUP_ARCHIVE="${BACKUP_ARCHIVE:-/archive}"
|
||||||
|
|
||||||
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
BACKUP_RETENTION_DAYS="${BACKUP_RETENTION_DAYS:-}"
|
||||||
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-10m}"
|
BACKUP_PRUNING_LEEWAY="${BACKUP_PRUNING_LEEWAY:-1m}"
|
||||||
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
|
BACKUP_PRUNING_PREFIX="${BACKUP_PRUNING_PREFIX:-}"
|
||||||
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
BACKUP_STOP_CONTAINER_LABEL="${BACKUP_STOP_CONTAINER_LABEL:-true}"
|
||||||
|
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -5,7 +5,9 @@ go 1.17
|
|||||||
require (
|
require (
|
||||||
github.com/docker/docker v20.10.8+incompatible
|
github.com/docker/docker v20.10.8+incompatible
|
||||||
github.com/joho/godotenv v1.3.0
|
github.com/joho/godotenv v1.3.0
|
||||||
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d
|
||||||
github.com/minio/minio-go/v7 v7.0.12
|
github.com/minio/minio-go/v7 v7.0.12
|
||||||
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a
|
github.com/walle/targz v0.0.0-20140417120357-57fe4206da5a
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
||||||
)
|
)
|
||||||
@@ -32,7 +34,6 @@ require (
|
|||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/rs/xid v1.2.1 // indirect
|
github.com/rs/xid v1.2.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 // indirect
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 // indirect
|
||||||
golang.org/x/text v0.3.4 // indirect
|
golang.org/x/text v0.3.4 // indirect
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -401,6 +401,8 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
|
|||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d h1:2puqoOQwi3Ai1oznMOsFIbifm6kIfJaLLyYzWD4IzTs=
|
||||||
|
github.com/leekchan/timeutil v0.0.0-20150802142658-28917288c48d/go.mod h1:hO90vCP2x3exaSH58BIAowSKvV+0OsY21TtzuFGHON4=
|
||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
|||||||
Reference in New Issue
Block a user