Merge pull request #252 from skriss/delete-backup
add delete backup cmd using finalizer and simplify GC processpull/243/head
commit
6b0b6373da
|
@ -419,13 +419,13 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "k8s.io/kubernetes"
|
name = "k8s.io/kubernetes"
|
||||||
packages = ["pkg/printers"]
|
packages = ["pkg/printers","pkg/util/version"]
|
||||||
revision = "bdaeafa71f6c7c04636251031f93464384d54963"
|
revision = "bdaeafa71f6c7c04636251031f93464384d54963"
|
||||||
version = "v1.8.2"
|
version = "v1.8.2"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "c3cd1b703421685e5b2343ced6eaa6ec958b9c44d62277322f4c93de164c2d04"
|
inputs-digest = "cd582891fb7e89c2ea28ea41e52687e2902b6105c6c3bf989bd628ebb4b72208"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|
|
@ -30,6 +30,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
||||||
### SEE ALSO
|
### SEE ALSO
|
||||||
* [ark backup](ark_backup.md) - Work with backups
|
* [ark backup](ark_backup.md) - Work with backups
|
||||||
* [ark create](ark_create.md) - Create ark resources
|
* [ark create](ark_create.md) - Create ark resources
|
||||||
|
* [ark delete](ark_delete.md) - Delete ark resources
|
||||||
* [ark describe](ark_describe.md) - Describe ark resources
|
* [ark describe](ark_describe.md) - Describe ark resources
|
||||||
* [ark get](ark_get.md) - Get ark resources
|
* [ark get](ark_get.md) - Get ark resources
|
||||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||||
|
|
|
@ -29,6 +29,7 @@ Work with backups
|
||||||
### SEE ALSO
|
### SEE ALSO
|
||||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||||
* [ark backup create](ark_backup_create.md) - Create a backup
|
* [ark backup create](ark_backup_create.md) - Create a backup
|
||||||
|
* [ark backup delete](ark_backup_delete.md) - Delete a backup
|
||||||
* [ark backup describe](ark_backup_describe.md) - Describe backups
|
* [ark backup describe](ark_backup_describe.md) - Describe backups
|
||||||
* [ark backup download](ark_backup_download.md) - Download a backup
|
* [ark backup download](ark_backup_download.md) - Download a backup
|
||||||
* [ark backup get](ark_backup_get.md) - Get backups
|
* [ark backup get](ark_backup_get.md) - Get backups
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
## ark backup delete
|
||||||
|
|
||||||
|
Delete a backup
|
||||||
|
|
||||||
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
Delete a backup
|
||||||
|
|
||||||
|
```
|
||||||
|
ark backup delete NAME [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for delete
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
```
|
||||||
|
--alsologtostderr log to standard error as well as files
|
||||||
|
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||||
|
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
|
--log_dir string If non-empty, write log files in this directory
|
||||||
|
--logtostderr log to standard error instead of files
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||||
|
-v, --v Level log level for V logs
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
```
|
||||||
|
|
||||||
|
### SEE ALSO
|
||||||
|
* [ark backup](ark_backup.md) - Work with backups
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
## ark delete
|
||||||
|
|
||||||
|
Delete ark resources
|
||||||
|
|
||||||
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
Delete ark resources
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for delete
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
```
|
||||||
|
--alsologtostderr log to standard error as well as files
|
||||||
|
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||||
|
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
|
--log_dir string If non-empty, write log files in this directory
|
||||||
|
--logtostderr log to standard error instead of files
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||||
|
-v, --v Level log level for V logs
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
```
|
||||||
|
|
||||||
|
### SEE ALSO
|
||||||
|
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||||
|
* [ark delete backup](ark_delete_backup.md) - Delete a backup
|
||||||
|
* [ark delete restore](ark_delete_restore.md) - Delete a restore
|
||||||
|
* [ark delete schedule](ark_delete_schedule.md) - Delete a schedule
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
## ark delete backup
|
||||||
|
|
||||||
|
Delete a backup
|
||||||
|
|
||||||
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
Delete a backup
|
||||||
|
|
||||||
|
```
|
||||||
|
ark delete backup NAME [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for backup
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
```
|
||||||
|
--alsologtostderr log to standard error as well as files
|
||||||
|
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||||
|
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
|
--log_dir string If non-empty, write log files in this directory
|
||||||
|
--logtostderr log to standard error instead of files
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||||
|
-v, --v Level log level for V logs
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
```
|
||||||
|
|
||||||
|
### SEE ALSO
|
||||||
|
* [ark delete](ark_delete.md) - Delete ark resources
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
## ark delete restore
|
||||||
|
|
||||||
|
Delete a restore
|
||||||
|
|
||||||
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
Delete a restore
|
||||||
|
|
||||||
|
```
|
||||||
|
ark delete restore NAME [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for restore
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
```
|
||||||
|
--alsologtostderr log to standard error as well as files
|
||||||
|
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||||
|
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
|
--log_dir string If non-empty, write log files in this directory
|
||||||
|
--logtostderr log to standard error instead of files
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||||
|
-v, --v Level log level for V logs
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
```
|
||||||
|
|
||||||
|
### SEE ALSO
|
||||||
|
* [ark delete](ark_delete.md) - Delete ark resources
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
## ark delete schedule
|
||||||
|
|
||||||
|
Delete a schedule
|
||||||
|
|
||||||
|
### Synopsis
|
||||||
|
|
||||||
|
|
||||||
|
Delete a schedule
|
||||||
|
|
||||||
|
```
|
||||||
|
ark delete schedule NAME [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-h, --help help for schedule
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
```
|
||||||
|
--alsologtostderr log to standard error as well as files
|
||||||
|
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||||
|
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||||
|
--log_dir string If non-empty, write log files in this directory
|
||||||
|
--logtostderr log to standard error instead of files
|
||||||
|
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||||
|
-v, --v Level log level for V logs
|
||||||
|
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||||
|
```
|
||||||
|
|
||||||
|
### SEE ALSO
|
||||||
|
* [ark delete](ark_delete.md) - Delete ark resources
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/heptio/ark/pkg/client"
|
"github.com/heptio/ark/pkg/client"
|
||||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||||
"github.com/heptio/ark/pkg/cmd/cli/create"
|
"github.com/heptio/ark/pkg/cmd/cli/create"
|
||||||
|
"github.com/heptio/ark/pkg/cmd/cli/delete"
|
||||||
"github.com/heptio/ark/pkg/cmd/cli/describe"
|
"github.com/heptio/ark/pkg/cmd/cli/describe"
|
||||||
"github.com/heptio/ark/pkg/cmd/cli/get"
|
"github.com/heptio/ark/pkg/cmd/cli/get"
|
||||||
"github.com/heptio/ark/pkg/cmd/cli/plugin"
|
"github.com/heptio/ark/pkg/cmd/cli/plugin"
|
||||||
|
@ -61,6 +62,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
|
||||||
create.NewCommand(f),
|
create.NewCommand(f),
|
||||||
runplugin.NewCommand(),
|
runplugin.NewCommand(),
|
||||||
plugin.NewCommand(f),
|
plugin.NewCommand(f),
|
||||||
|
delete.NewCommand(f),
|
||||||
)
|
)
|
||||||
|
|
||||||
// add the glog flags
|
// add the glog flags
|
||||||
|
|
|
@ -35,10 +35,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||||
NewLogsCommand(f),
|
NewLogsCommand(f),
|
||||||
NewDescribeCommand(f, "describe"),
|
NewDescribeCommand(f, "describe"),
|
||||||
NewDownloadCommand(f),
|
NewDownloadCommand(f),
|
||||||
|
NewDeleteCommand(f, "delete"),
|
||||||
// If you delete a backup and it still exists in object storage, the backup sync controller will
|
|
||||||
// recreate it. Until we have a good UX around this, we're disabling the delete command.
|
|
||||||
// NewDeleteCommand(f),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return c
|
return c
|
||||||
|
|
|
@ -20,16 +20,19 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"github.com/heptio/ark/pkg/client"
|
"github.com/heptio/ark/pkg/client"
|
||||||
"github.com/heptio/ark/pkg/cmd"
|
"github.com/heptio/ark/pkg/cmd"
|
||||||
|
"github.com/heptio/ark/pkg/controller"
|
||||||
|
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeleteCommand(f client.Factory) *cobra.Command {
|
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
|
||||||
c := &cobra.Command{
|
c := &cobra.Command{
|
||||||
Use: "delete NAME",
|
Use: fmt.Sprintf("%s NAME", use),
|
||||||
Short: "Delete a backup",
|
Short: "Delete a backup",
|
||||||
Run: func(c *cobra.Command, args []string) {
|
Run: func(c *cobra.Command, args []string) {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
|
@ -37,6 +40,16 @@ func NewDeleteCommand(f client.Factory) *cobra.Command {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubeClient, err := f.KubeClient()
|
||||||
|
cmd.CheckError(err)
|
||||||
|
|
||||||
|
serverVersion, err := kubeutil.ServerVersion(kubeClient.Discovery())
|
||||||
|
cmd.CheckError(err)
|
||||||
|
|
||||||
|
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
|
||||||
|
cmd.CheckError(errors.Errorf("this command requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete))
|
||||||
|
}
|
||||||
|
|
||||||
arkClient, err := f.Client()
|
arkClient, err := f.Client()
|
||||||
cmd.CheckError(err)
|
cmd.CheckError(err)
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 the Heptio Ark contributors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package delete
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/heptio/ark/pkg/client"
|
||||||
|
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||||
|
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||||
|
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewCommand(f client.Factory) *cobra.Command {
|
||||||
|
c := &cobra.Command{
|
||||||
|
Use: "delete",
|
||||||
|
Short: "Delete ark resources",
|
||||||
|
Long: "Delete ark resources",
|
||||||
|
}
|
||||||
|
|
||||||
|
backupCommand := backup.NewDeleteCommand(f, "backup")
|
||||||
|
backupCommand.Aliases = []string{"backups"}
|
||||||
|
|
||||||
|
restoreCommand := restore.NewDeleteCommand(f, "restore")
|
||||||
|
restoreCommand.Aliases = []string{"restores"}
|
||||||
|
|
||||||
|
scheduleCommand := schedule.NewDeleteCommand(f, "schedule")
|
||||||
|
scheduleCommand.Aliases = []string{"schedules"}
|
||||||
|
|
||||||
|
c.AddCommand(
|
||||||
|
backupCommand,
|
||||||
|
restoreCommand,
|
||||||
|
scheduleCommand,
|
||||||
|
)
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
|
@ -27,9 +27,9 @@ import (
|
||||||
"github.com/heptio/ark/pkg/cmd"
|
"github.com/heptio/ark/pkg/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeleteCommand(f client.Factory) *cobra.Command {
|
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
|
||||||
c := &cobra.Command{
|
c := &cobra.Command{
|
||||||
Use: "delete NAME",
|
Use: fmt.Sprintf("%s NAME", use),
|
||||||
Short: "Delete a restore",
|
Short: "Delete a restore",
|
||||||
Run: func(c *cobra.Command, args []string) {
|
Run: func(c *cobra.Command, args []string) {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
|
|
|
@ -34,7 +34,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||||
NewGetCommand(f, "get"),
|
NewGetCommand(f, "get"),
|
||||||
NewLogsCommand(f),
|
NewLogsCommand(f),
|
||||||
NewDescribeCommand(f, "describe"),
|
NewDescribeCommand(f, "describe"),
|
||||||
NewDeleteCommand(f),
|
NewDeleteCommand(f, "delete"),
|
||||||
)
|
)
|
||||||
|
|
||||||
return c
|
return c
|
||||||
|
|
|
@ -27,9 +27,9 @@ import (
|
||||||
"github.com/heptio/ark/pkg/cmd"
|
"github.com/heptio/ark/pkg/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeleteCommand(f client.Factory) *cobra.Command {
|
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
|
||||||
c := &cobra.Command{
|
c := &cobra.Command{
|
||||||
Use: "delete NAME",
|
Use: fmt.Sprintf("%s NAME", use),
|
||||||
Short: "Delete a schedule",
|
Short: "Delete a schedule",
|
||||||
Run: func(c *cobra.Command, args []string) {
|
Run: func(c *cobra.Command, args []string) {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
|
|
|
@ -33,7 +33,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||||
NewCreateCommand(f, "create"),
|
NewCreateCommand(f, "create"),
|
||||||
NewGetCommand(f, "get"),
|
NewGetCommand(f, "get"),
|
||||||
NewDescribeCommand(f, "describe"),
|
NewDescribeCommand(f, "describe"),
|
||||||
NewDeleteCommand(f),
|
NewDeleteCommand(f, "delete"),
|
||||||
)
|
)
|
||||||
|
|
||||||
return c
|
return c
|
||||||
|
|
|
@ -55,6 +55,7 @@ import (
|
||||||
"github.com/heptio/ark/pkg/plugin"
|
"github.com/heptio/ark/pkg/plugin"
|
||||||
"github.com/heptio/ark/pkg/restore"
|
"github.com/heptio/ark/pkg/restore"
|
||||||
"github.com/heptio/ark/pkg/util/kube"
|
"github.com/heptio/ark/pkg/util/kube"
|
||||||
|
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||||
"github.com/heptio/ark/pkg/util/logging"
|
"github.com/heptio/ark/pkg/util/logging"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -470,22 +471,31 @@ func (s *server) runControllers(config *api.Config) error {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
gcController := controller.NewGCController(
|
serverVersion, err := kubeutil.ServerVersion(s.kubeClient.Discovery())
|
||||||
s.backupService,
|
if err != nil {
|
||||||
s.snapshotService,
|
return err
|
||||||
config.BackupStorageProvider.Bucket,
|
}
|
||||||
config.GCSyncPeriod.Duration,
|
|
||||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
|
||||||
s.arkClient.ArkV1(),
|
s.logger.Errorf("Garbage-collection is disabled because it requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete)
|
||||||
s.sharedInformerFactory.Ark().V1().Restores(),
|
} else {
|
||||||
s.arkClient.ArkV1(),
|
gcController := controller.NewGCController(
|
||||||
s.logger,
|
s.backupService,
|
||||||
)
|
s.snapshotService,
|
||||||
wg.Add(1)
|
config.BackupStorageProvider.Bucket,
|
||||||
go func() {
|
config.GCSyncPeriod.Duration,
|
||||||
gcController.Run(ctx, 1)
|
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||||
wg.Done()
|
s.arkClient.ArkV1(),
|
||||||
}()
|
s.sharedInformerFactory.Ark().V1().Restores(),
|
||||||
|
s.arkClient.ArkV1(),
|
||||||
|
s.logger,
|
||||||
|
)
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
gcController.Run(ctx, 1)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
restorer, err := newRestorer(
|
restorer, err := newRestorer(
|
||||||
|
|
|
@ -234,6 +234,11 @@ func (controller *backupController) processBackup(key string) error {
|
||||||
// set backup version
|
// set backup version
|
||||||
backup.Status.Version = backupVersion
|
backup.Status.Version = backupVersion
|
||||||
|
|
||||||
|
// add GC finalizer if it's not there already
|
||||||
|
if !has(backup.Finalizers, gcFinalizer) {
|
||||||
|
backup.Finalizers = append(backup.Finalizers, gcFinalizer)
|
||||||
|
}
|
||||||
|
|
||||||
// calculate expiration
|
// calculate expiration
|
||||||
if backup.Spec.TTL.Duration > 0 {
|
if backup.Spec.TTL.Duration > 0 {
|
||||||
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
|
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
|
||||||
|
|
|
@ -190,6 +190,7 @@ func TestProcessBackup(t *testing.T) {
|
||||||
backup.Status.Phase = v1.BackupPhaseInProgress
|
backup.Status.Phase = v1.BackupPhaseInProgress
|
||||||
backup.Status.Expiration.Time = expiration
|
backup.Status.Expiration.Time = expiration
|
||||||
backup.Status.Version = 1
|
backup.Status.Version = 1
|
||||||
|
backup.Finalizers = []string{gcFinalizer}
|
||||||
backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||||
|
|
||||||
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||||
|
@ -225,6 +226,7 @@ func TestProcessBackup(t *testing.T) {
|
||||||
res.Status.Version = 1
|
res.Status.Version = 1
|
||||||
res.Status.Expiration.Time = expiration
|
res.Status.Expiration.Time = expiration
|
||||||
res.Status.Phase = v1.BackupPhase(phase)
|
res.Status.Phase = v1.BackupPhase(phase)
|
||||||
|
res.Finalizers = []string{gcFinalizer}
|
||||||
|
|
||||||
return true, res, nil
|
return true, res, nil
|
||||||
})
|
})
|
||||||
|
@ -247,14 +249,15 @@ func TestProcessBackup(t *testing.T) {
|
||||||
actions := client.Actions()
|
actions := client.Actions()
|
||||||
require.Equal(t, 2, len(actions))
|
require.Equal(t, 2, len(actions))
|
||||||
|
|
||||||
// validate Patch call 1 (setting version, expiration, and phase)
|
// validate Patch call 1 (setting finalizer, version, expiration, and phase)
|
||||||
patchAction, ok := actions[0].(core.PatchAction)
|
patchAction, ok := actions[0].(core.PatchAction)
|
||||||
require.True(t, ok, "action is not a PatchAction")
|
require.True(t, ok, "action is not a PatchAction")
|
||||||
|
|
||||||
patch := make(map[string]interface{})
|
patch := make(map[string]interface{})
|
||||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||||
|
|
||||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
// should have metadata and status
|
||||||
|
assert.Equal(t, 2, len(patch), "patch has wrong number of keys")
|
||||||
|
|
||||||
expectedStatusKeys := 2
|
expectedStatusKeys := 2
|
||||||
if test.backup.Spec.TTL.Duration > 0 {
|
if test.backup.Spec.TTL.Duration > 0 {
|
||||||
|
@ -268,10 +271,19 @@ func TestProcessBackup(t *testing.T) {
|
||||||
res, _ := collections.GetMap(patch, "status")
|
res, _ := collections.GetMap(patch, "status")
|
||||||
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
|
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
|
||||||
|
|
||||||
|
finalizers, err := collections.GetSlice(patch, "metadata.finalizers")
|
||||||
|
require.NoError(t, err, "patch does not contain metadata.finalizers")
|
||||||
|
assert.Equal(t, 1, len(finalizers))
|
||||||
|
assert.Equal(t, gcFinalizer, finalizers[0])
|
||||||
|
|
||||||
|
res, _ = collections.GetMap(patch, "metadata")
|
||||||
|
assert.Equal(t, 1, len(res), "patch's metadata has the wrong number of keys")
|
||||||
|
|
||||||
// validate Patch call 2 (setting phase)
|
// validate Patch call 2 (setting phase)
|
||||||
patchAction, ok = actions[1].(core.PatchAction)
|
patchAction, ok = actions[1].(core.PatchAction)
|
||||||
require.True(t, ok, "action is not a PatchAction")
|
require.True(t, ok, "action is not a PatchAction")
|
||||||
|
|
||||||
|
patch = make(map[string]interface{})
|
||||||
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
|
||||||
|
|
||||||
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
|
||||||
|
|
|
@ -18,6 +18,7 @@ package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -25,9 +26,12 @@ import (
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/util/version"
|
||||||
|
|
||||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"github.com/heptio/ark/pkg/cloudprovider"
|
"github.com/heptio/ark/pkg/cloudprovider"
|
||||||
|
@ -37,6 +41,15 @@ import (
|
||||||
"github.com/heptio/ark/pkg/util/kube"
|
"github.com/heptio/ark/pkg/util/kube"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const gcFinalizer = "gc.ark.heptio.com"
|
||||||
|
|
||||||
|
// MinVersionForDelete is the minimum Kubernetes server version that Ark
|
||||||
|
// requires in order to be able to properly delete backups (including
|
||||||
|
// the associated snapshots and object storage files). This is because
|
||||||
|
// Ark uses finalizers on the backup CRD to implement garbage-collection
|
||||||
|
// and deletion.
|
||||||
|
var MinVersionForDelete = version.MustParseSemantic("1.7.5")
|
||||||
|
|
||||||
// gcController removes expired backup content from object storage.
|
// gcController removes expired backup content from object storage.
|
||||||
type gcController struct {
|
type gcController struct {
|
||||||
backupService cloudprovider.BackupService
|
backupService cloudprovider.BackupService
|
||||||
|
@ -70,7 +83,7 @@ func NewGCController(
|
||||||
syncPeriod = time.Minute
|
syncPeriod = time.Minute
|
||||||
}
|
}
|
||||||
|
|
||||||
return &gcController{
|
c := &gcController{
|
||||||
backupService: backupService,
|
backupService: backupService,
|
||||||
snapshotService: snapshotService,
|
snapshotService: snapshotService,
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
|
@ -84,9 +97,87 @@ func NewGCController(
|
||||||
restoreClient: restoreClient,
|
restoreClient: restoreClient,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
backupInformer.Informer().AddEventHandler(
|
||||||
|
cache.ResourceEventHandlerFuncs{
|
||||||
|
UpdateFunc: c.handleFinalizer,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Interface = &gcController{}
|
// handleFinalizer runs garbage-collection on a backup that has the Ark GC
|
||||||
|
// finalizer and a deletionTimestamp.
|
||||||
|
func (c *gcController) handleFinalizer(_, newObj interface{}) {
|
||||||
|
var (
|
||||||
|
backup = newObj.(*api.Backup)
|
||||||
|
log = c.logger.WithField("backup", kube.NamespaceAndName(backup))
|
||||||
|
)
|
||||||
|
|
||||||
|
// we're only interested in backups that have a deletionTimestamp and at
|
||||||
|
// least one finalizer.
|
||||||
|
if backup.DeletionTimestamp == nil || len(backup.Finalizers) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Debugf("Backup has finalizers %s", backup.Finalizers)
|
||||||
|
|
||||||
|
if !has(backup.Finalizers, gcFinalizer) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("Garbage-collecting backup")
|
||||||
|
if err := c.garbageCollect(backup, log); err != nil {
|
||||||
|
// if there were errors deleting related cloud resources, don't
|
||||||
|
// delete the backup API object because we don't want to orphan
|
||||||
|
// the cloud resources.
|
||||||
|
log.WithError(err).Error("Error deleting backup's related objects")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
patchMap := map[string]interface{}{
|
||||||
|
"metadata": map[string]interface{}{
|
||||||
|
"finalizers": except(backup.Finalizers, gcFinalizer),
|
||||||
|
"resourceVersion": backup.ResourceVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
patchBytes, err := json.Marshal(patchMap)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Error marshaling finalizers patch")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = c.backupClient.Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes); err != nil {
|
||||||
|
log.WithError(errors.WithStack(err)).Error("Error patching backup")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// has returns true if the `items` slice contains the
|
||||||
|
// value `val`, or false otherwise.
|
||||||
|
func has(items []string, val string) bool {
|
||||||
|
for _, itm := range items {
|
||||||
|
if itm == val {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// except returns a new string slice that contains all of the entries
|
||||||
|
// from `items` except `val`.
|
||||||
|
func except(items []string, val string) []string {
|
||||||
|
var newItems []string
|
||||||
|
|
||||||
|
for _, itm := range items {
|
||||||
|
if itm != val {
|
||||||
|
newItems = append(newItems, itm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return newItems
|
||||||
|
}
|
||||||
|
|
||||||
// Run is a blocking function that runs a single worker to garbage-collect backups
|
// Run is a blocking function that runs a single worker to garbage-collect backups
|
||||||
// from object/block storage and the Ark API. It will return when it receives on the
|
// from object/block storage and the Ark API. It will return when it receives on the
|
||||||
|
@ -103,104 +194,74 @@ func (c *gcController) Run(ctx context.Context, workers int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gcController) run() {
|
func (c *gcController) run() {
|
||||||
c.processBackups()
|
now := c.clock.Now()
|
||||||
}
|
c.logger.Info("Garbage-collecting expired backups")
|
||||||
|
|
||||||
// garbageCollectBackup removes an expired backup by deleting any associated backup files (if
|
// Go thru API objects and delete expired ones (finalizer will GC their
|
||||||
// deleteBackupFiles = true), volume snapshots, restore API objects, and the backup API object
|
// corresponding files/snapshots/restores). Note that we're ignoring backups
|
||||||
// itself.
|
// in object storage that haven't been synced to Kubernetes yet; they'll
|
||||||
func (c *gcController) garbageCollectBackup(backup *api.Backup, deleteBackupFiles bool) {
|
// be processed for GC (if applicable) once they've been synced.
|
||||||
logContext := c.logger.WithField("backup", kube.NamespaceAndName(backup))
|
backups, err := c.backupLister.List(labels.Everything())
|
||||||
|
if err != nil {
|
||||||
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
|
c.logger.WithError(errors.WithStack(err)).Error("Error getting all backups")
|
||||||
// want to orphan the snapshots so skip garbage-collection entirely.
|
|
||||||
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
|
|
||||||
logContext.Warning("Cannot garbage-collect backup because backup includes snapshots and server is not configured with PersistentVolumeProvider")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// The GC process is primarily intended to delete expired cloud resources (file in object
|
for _, backup := range backups {
|
||||||
// storage, snapshots). If we fail to delete any of these, we don't delete the Backup API
|
log := c.logger.WithField("backup", kube.NamespaceAndName(backup))
|
||||||
// object or metadata file in object storage so that we don't orphan the cloud resources.
|
if backup.Status.Expiration.Time.After(now) {
|
||||||
deletionFailure := false
|
log.Debug("Backup has not expired yet, skipping")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// since backups have a finalizer, this will actually have the effect of setting a deletionTimestamp and calling
|
||||||
|
// an update. The update will be handled by this controller and will result in a deletion of the obj storage
|
||||||
|
// files and the API object.
|
||||||
|
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
|
||||||
|
log.WithError(errors.WithStack(err)).Error("Error deleting backup")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// garbageCollect prepares for deleting an expired backup by deleting any
|
||||||
|
// associated backup files, volume snapshots, or restore API objects.
|
||||||
|
func (c *gcController) garbageCollect(backup *api.Backup, log logrus.FieldLogger) error {
|
||||||
|
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
|
||||||
|
// want to orphan the snapshots so skip garbage-collection entirely.
|
||||||
|
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
|
||||||
|
return errors.New("cannot garbage-collect backup because it includes snapshots and Ark is not configured with a PersistentVolumeProvider")
|
||||||
|
}
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
|
||||||
for _, volumeBackup := range backup.Status.VolumeBackups {
|
for _, volumeBackup := range backup.Status.VolumeBackups {
|
||||||
logContext.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
|
log.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
|
||||||
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
|
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
|
||||||
logContext.WithError(err).WithField("snapshotID", volumeBackup.SnapshotID).Error("Error deleting snapshot")
|
errs = append(errs, errors.Wrapf(err, "error deleting snapshot %s", volumeBackup.SnapshotID))
|
||||||
deletionFailure = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If applicable, delete everything in the backup dir in object storage *before* deleting the API object
|
log.Info("Removing backup from object storage")
|
||||||
// because otherwise the backup sync controller could re-sync the backup from object storage.
|
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
|
||||||
if deleteBackupFiles {
|
errs = append(errs, errors.Wrap(err, "error deleting backup from object storage"))
|
||||||
logContext.Info("Removing backup from object storage")
|
|
||||||
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
|
|
||||||
logContext.WithError(err).Error("Error deleting backup")
|
|
||||||
deletionFailure = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logContext.Info("Getting restore API objects referencing backup")
|
|
||||||
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
|
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
|
||||||
logContext.WithError(errors.WithStack(err)).Error("Error getting Restore API objects")
|
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
|
||||||
} else {
|
} else {
|
||||||
for _, restore := range restores {
|
for _, restore := range restores {
|
||||||
if restore.Spec.BackupName == backup.Name {
|
if restore.Spec.BackupName != backup.Name {
|
||||||
logContext.WithField("restore", kube.NamespaceAndName(restore)).Info("Removing Restore API object referencing Backup")
|
continue
|
||||||
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
|
}
|
||||||
logContext.WithError(errors.WithStack(err)).WithField("restore", kube.NamespaceAndName(restore)).
|
|
||||||
Error("Error deleting Restore API object")
|
restoreLog := log.WithField("restore", kube.NamespaceAndName(restore))
|
||||||
}
|
|
||||||
|
restoreLog.Info("Deleting restore referencing backup")
|
||||||
|
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
|
||||||
|
restoreLog.WithError(errors.WithStack(err)).Error("Error deleting restore")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if deletionFailure {
|
return kerrors.NewAggregate(errs)
|
||||||
logContext.Warning("Backup will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logContext.Info("Removing Backup API object")
|
|
||||||
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
|
|
||||||
logContext.WithError(errors.WithStack(err)).Error("Error deleting Backup API object")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// garbageCollectBackups checks backups for expiration and triggers garbage-collection for the expired
|
|
||||||
// ones.
|
|
||||||
func (c *gcController) garbageCollectBackups(backups []*api.Backup, expiration time.Time, deleteBackupFiles bool) {
|
|
||||||
for _, backup := range backups {
|
|
||||||
if backup.Status.Expiration.Time.After(expiration) {
|
|
||||||
c.logger.WithField("backup", kube.NamespaceAndName(backup)).Info("Backup has not expired yet, skipping")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
c.garbageCollectBackup(backup, deleteBackupFiles)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// processBackups gets backups from object storage and the API and submits
|
|
||||||
// them for garbage-collection.
|
|
||||||
func (c *gcController) processBackups() {
|
|
||||||
now := c.clock.Now()
|
|
||||||
c.logger.WithField("now", now).Info("Garbage-collecting backups that have expired as of now")
|
|
||||||
|
|
||||||
// GC backups in object storage. We do this in addition
|
|
||||||
// to GC'ing API objects to prevent orphan backup files.
|
|
||||||
backups, err := c.backupService.GetAllBackups(c.bucket)
|
|
||||||
if err != nil {
|
|
||||||
c.logger.WithError(err).Error("Error getting all backups from object storage")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.garbageCollectBackups(backups, now, true)
|
|
||||||
|
|
||||||
// GC backups without files in object storage
|
|
||||||
apiBackups, err := c.backupLister.List(labels.Everything())
|
|
||||||
if err != nil {
|
|
||||||
c.logger.WithError(errors.WithStack(err)).Error("Error getting all Backup API objects")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.garbageCollectBackups(apiBackups, now, false)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,38 +17,37 @@ limitations under the License.
|
||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
|
|
||||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||||
"github.com/heptio/ark/pkg/cloudprovider"
|
|
||||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||||
arktest "github.com/heptio/ark/pkg/util/test"
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
type gcTest struct {
|
func TestGCControllerRun(t *testing.T) {
|
||||||
name string
|
|
||||||
backups []*api.Backup
|
|
||||||
snapshots sets.String
|
|
||||||
nilSnapshotService bool
|
|
||||||
|
|
||||||
expectedDeletions sets.String
|
|
||||||
expectedSnapshotsRemaining sets.String
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGarbageCollect(t *testing.T) {
|
|
||||||
fakeClock := clock.NewFakeClock(time.Now())
|
fakeClock := clock.NewFakeClock(time.Now())
|
||||||
|
|
||||||
tests := []gcTest{
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
backups []*api.Backup
|
||||||
|
snapshots sets.String
|
||||||
|
expectedDeletions sets.String
|
||||||
|
}{
|
||||||
{
|
{
|
||||||
name: "basic-expired",
|
name: "no backups results in no deletions",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "expired backup is deleted",
|
||||||
backups: []*api.Backup{
|
backups: []*api.Backup{
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
arktest.NewTestBackup().WithName("backup-1").
|
||||||
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
||||||
|
@ -56,12 +55,10 @@ func TestGarbageCollect(t *testing.T) {
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
WithSnapshot("pv-2", "snapshot-2").
|
||||||
Backup,
|
Backup,
|
||||||
},
|
},
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
|
expectedDeletions: sets.NewString("backup-1"),
|
||||||
expectedDeletions: sets.NewString("backup-1"),
|
|
||||||
expectedSnapshotsRemaining: sets.NewString(),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "basic-unexpired",
|
name: "unexpired backup is not deleted",
|
||||||
backups: []*api.Backup{
|
backups: []*api.Backup{
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
arktest.NewTestBackup().WithName("backup-1").
|
||||||
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
||||||
|
@ -69,12 +66,10 @@ func TestGarbageCollect(t *testing.T) {
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
WithSnapshot("pv-2", "snapshot-2").
|
||||||
Backup,
|
Backup,
|
||||||
},
|
},
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
|
expectedDeletions: sets.NewString(),
|
||||||
expectedDeletions: sets.NewString(),
|
|
||||||
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2"),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "one expired, one unexpired",
|
name: "expired backup is deleted and unexpired backup is not deleted",
|
||||||
backups: []*api.Backup{
|
backups: []*api.Backup{
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
arktest.NewTestBackup().WithName("backup-1").
|
||||||
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
|
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
|
||||||
|
@ -87,144 +82,91 @@ func TestGarbageCollect(t *testing.T) {
|
||||||
WithSnapshot("pv-4", "snapshot-4").
|
WithSnapshot("pv-4", "snapshot-4").
|
||||||
Backup,
|
Backup,
|
||||||
},
|
},
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
|
expectedDeletions: sets.NewString("backup-1"),
|
||||||
expectedDeletions: sets.NewString("backup-1"),
|
|
||||||
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "none expired in target bucket",
|
|
||||||
backups: []*api.Backup{
|
|
||||||
arktest.NewTestBackup().WithName("backup-2").
|
|
||||||
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
|
|
||||||
WithSnapshot("pv-3", "snapshot-3").
|
|
||||||
WithSnapshot("pv-4", "snapshot-4").
|
|
||||||
Backup,
|
|
||||||
},
|
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
|
|
||||||
expectedDeletions: sets.NewString(),
|
|
||||||
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "orphan snapshots",
|
|
||||||
backups: []*api.Backup{
|
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
|
||||||
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
|
|
||||||
WithSnapshot("pv-1", "snapshot-1").
|
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
|
||||||
Backup,
|
|
||||||
},
|
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
|
|
||||||
expectedDeletions: sets.NewString("backup-1"),
|
|
||||||
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no snapshot service only GC's backups without snapshots",
|
|
||||||
backups: []*api.Backup{
|
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
|
||||||
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
|
||||||
WithSnapshot("pv-1", "snapshot-1").
|
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
|
||||||
Backup,
|
|
||||||
arktest.NewTestBackup().WithName("backup-2").
|
|
||||||
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
|
|
||||||
Backup,
|
|
||||||
},
|
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
|
|
||||||
nilSnapshotService: true,
|
|
||||||
expectedDeletions: sets.NewString("backup-2"),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
var (
|
|
||||||
backupService = &arktest.BackupService{}
|
|
||||||
snapshotService *arktest.FakeSnapshotService
|
|
||||||
)
|
|
||||||
|
|
||||||
if !test.nilSnapshotService {
|
|
||||||
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
client = fake.NewSimpleClientset()
|
client = fake.NewSimpleClientset()
|
||||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||||
snapSvc cloudprovider.SnapshotService
|
|
||||||
bucket = "bucket"
|
|
||||||
logger = arktest.NewLogger()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if snapshotService != nil {
|
|
||||||
snapSvc = snapshotService
|
|
||||||
}
|
|
||||||
|
|
||||||
controller := NewGCController(
|
controller := NewGCController(
|
||||||
backupService,
|
nil,
|
||||||
snapSvc,
|
nil,
|
||||||
bucket,
|
"bucket",
|
||||||
1*time.Millisecond,
|
1*time.Millisecond,
|
||||||
sharedInformers.Ark().V1().Backups(),
|
sharedInformers.Ark().V1().Backups(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
sharedInformers.Ark().V1().Restores(),
|
sharedInformers.Ark().V1().Restores(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
logger,
|
arktest.NewLogger(),
|
||||||
).(*gcController)
|
).(*gcController)
|
||||||
controller.clock = fakeClock
|
controller.clock = fakeClock
|
||||||
|
|
||||||
backupService.On("GetAllBackups", bucket).Return(test.backups, nil)
|
for _, backup := range test.backups {
|
||||||
for _, b := range test.expectedDeletions.List() {
|
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
|
||||||
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
controller.processBackups()
|
expectedDeletions := make([]core.Action, 0, len(test.expectedDeletions))
|
||||||
|
for backup := range test.expectedDeletions {
|
||||||
if !test.nilSnapshotService {
|
expectedDeletions = append(expectedDeletions, core.NewDeleteAction(
|
||||||
assert.Equal(t, test.expectedSnapshotsRemaining, snapshotService.SnapshotsTaken)
|
api.SchemeGroupVersion.WithResource("backups"),
|
||||||
|
api.DefaultNamespace,
|
||||||
|
backup,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
backupService.AssertExpectations(t)
|
controller.run()
|
||||||
|
|
||||||
|
assert.Equal(t, expectedDeletions, client.Actions())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGarbageCollectBackup(t *testing.T) {
|
func TestGarbageCollectBackup(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
backup *api.Backup
|
backup *api.Backup
|
||||||
deleteBackupFile bool
|
snapshots sets.String
|
||||||
snapshots sets.String
|
restores []*api.Restore
|
||||||
backupFiles sets.String
|
nilSnapshotService bool
|
||||||
backupMetadataFiles sets.String
|
expectErr bool
|
||||||
restores []*api.Restore
|
expectBackupDirDeleted bool
|
||||||
expectedRestoreDeletes []string
|
|
||||||
expectedBackupDelete string
|
|
||||||
expectedSnapshots sets.String
|
|
||||||
expectedObjectStorageDeletions sets.String
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "deleteBackupFile=false, snapshot deletion fails, don't delete kube backup",
|
name: "nil snapshot service when backup has snapshots returns error",
|
||||||
|
backup: arktest.NewTestBackup().WithName("backup-1").WithSnapshot("pv-1", "snap-1").Backup,
|
||||||
|
nilSnapshotService: true,
|
||||||
|
expectErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil snapshot service when backup doesn't have snapshots correctly garbage-collects",
|
||||||
|
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||||
|
nilSnapshotService: true,
|
||||||
|
expectBackupDirDeleted: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "return error if snapshot deletion fails",
|
||||||
backup: arktest.NewTestBackup().WithName("backup-1").
|
backup: arktest.NewTestBackup().WithName("backup-1").
|
||||||
WithSnapshot("pv-1", "snapshot-1").
|
WithSnapshot("pv-1", "snapshot-1").
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
WithSnapshot("pv-2", "snapshot-2").
|
||||||
Backup,
|
Backup,
|
||||||
deleteBackupFile: false,
|
snapshots: sets.NewString("snapshot-1"),
|
||||||
snapshots: sets.NewString("snapshot-1"),
|
expectBackupDirDeleted: true,
|
||||||
expectedSnapshots: sets.NewString(),
|
expectErr: true,
|
||||||
expectedObjectStorageDeletions: sets.NewString(),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "related restores should be deleted",
|
name: "related restores should be deleted",
|
||||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||||
deleteBackupFile: true,
|
snapshots: sets.NewString(),
|
||||||
snapshots: sets.NewString(),
|
|
||||||
restores: []*api.Restore{
|
restores: []*api.Restore{
|
||||||
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
|
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
|
||||||
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
|
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
|
||||||
},
|
},
|
||||||
expectedRestoreDeletes: []string{"restore-1"},
|
expectBackupDirDeleted: true,
|
||||||
expectedBackupDelete: "backup-1",
|
|
||||||
expectedSnapshots: sets.NewString(),
|
|
||||||
expectedObjectStorageDeletions: sets.NewString("backup-1"),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,61 +177,67 @@ func TestGarbageCollectBackup(t *testing.T) {
|
||||||
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||||
client = fake.NewSimpleClientset()
|
client = fake.NewSimpleClientset()
|
||||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||||
bucket = "bucket-1"
|
|
||||||
logger = arktest.NewLogger()
|
|
||||||
controller = NewGCController(
|
controller = NewGCController(
|
||||||
backupService,
|
backupService,
|
||||||
snapshotService,
|
snapshotService,
|
||||||
bucket,
|
"bucket-1",
|
||||||
1*time.Millisecond,
|
1*time.Millisecond,
|
||||||
sharedInformers.Ark().V1().Backups(),
|
sharedInformers.Ark().V1().Backups(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
sharedInformers.Ark().V1().Restores(),
|
sharedInformers.Ark().V1().Restores(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
logger,
|
arktest.NewLogger(),
|
||||||
).(*gcController)
|
).(*gcController)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if test.nilSnapshotService {
|
||||||
|
controller.snapshotService = nil
|
||||||
|
}
|
||||||
|
|
||||||
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
|
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
|
||||||
for _, restore := range test.restores {
|
for _, restore := range test.restores {
|
||||||
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore)
|
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, b := range test.expectedObjectStorageDeletions.List() {
|
if test.expectBackupDirDeleted {
|
||||||
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
|
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// METHOD UNDER TEST
|
// METHOD UNDER TEST
|
||||||
controller.garbageCollectBackup(test.backup, test.deleteBackupFile)
|
err := controller.garbageCollect(test.backup, controller.logger)
|
||||||
|
|
||||||
// VERIFY:
|
// VERIFY:
|
||||||
|
|
||||||
// remaining snapshots
|
// error
|
||||||
assert.Equal(t, test.expectedSnapshots, snapshotService.SnapshotsTaken)
|
assert.Equal(t, test.expectErr, err != nil)
|
||||||
|
|
||||||
|
// remaining snapshots
|
||||||
|
if !test.nilSnapshotService {
|
||||||
|
backupSnapshots := sets.NewString()
|
||||||
|
for _, snapshot := range test.backup.Status.VolumeBackups {
|
||||||
|
backupSnapshots.Insert(snapshot.SnapshotID)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, test.snapshots.Difference(backupSnapshots), snapshotService.SnapshotsTaken)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore client deletes
|
||||||
expectedActions := make([]core.Action, 0)
|
expectedActions := make([]core.Action, 0)
|
||||||
// Restore client deletes
|
for _, restore := range test.restores {
|
||||||
for _, restore := range test.expectedRestoreDeletes {
|
if restore.Spec.BackupName != test.backup.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
action := core.NewDeleteAction(
|
action := core.NewDeleteAction(
|
||||||
api.SchemeGroupVersion.WithResource("restores"),
|
api.SchemeGroupVersion.WithResource("restores"),
|
||||||
api.DefaultNamespace,
|
api.DefaultNamespace,
|
||||||
restore,
|
restore.Name,
|
||||||
)
|
)
|
||||||
expectedActions = append(expectedActions, action)
|
expectedActions = append(expectedActions, action)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup client deletes
|
|
||||||
if test.expectedBackupDelete != "" {
|
|
||||||
action := core.NewDeleteAction(
|
|
||||||
api.SchemeGroupVersion.WithResource("backups"),
|
|
||||||
api.DefaultNamespace,
|
|
||||||
test.expectedBackupDelete,
|
|
||||||
)
|
|
||||||
expectedActions = append(expectedActions, action)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, expectedActions, client.Actions())
|
assert.Equal(t, expectedActions, client.Actions())
|
||||||
|
|
||||||
|
// backup dir deletion
|
||||||
backupService.AssertExpectations(t)
|
backupService.AssertExpectations(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -297,59 +245,126 @@ func TestGarbageCollectBackup(t *testing.T) {
|
||||||
|
|
||||||
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
|
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
backupService = &arktest.BackupService{}
|
|
||||||
snapshotService = &arktest.FakeSnapshotService{}
|
|
||||||
fakeClock = clock.NewFakeClock(time.Now())
|
fakeClock = clock.NewFakeClock(time.Now())
|
||||||
assert = assert.New(t)
|
client = fake.NewSimpleClientset()
|
||||||
)
|
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||||
|
backup = arktest.NewTestBackup().WithName("backup-1").
|
||||||
scenario := gcTest{
|
|
||||||
name: "basic-expired",
|
|
||||||
backups: []*api.Backup{
|
|
||||||
arktest.NewTestBackup().WithName("backup-1").
|
|
||||||
WithExpiration(fakeClock.Now().Add(1*time.Second)).
|
WithExpiration(fakeClock.Now().Add(1*time.Second)).
|
||||||
WithSnapshot("pv-1", "snapshot-1").
|
WithSnapshot("pv-1", "snapshot-1").
|
||||||
WithSnapshot("pv-2", "snapshot-2").
|
WithSnapshot("pv-2", "snapshot-2").
|
||||||
Backup,
|
Backup
|
||||||
},
|
|
||||||
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshotService.SnapshotsTaken = scenario.snapshots
|
|
||||||
|
|
||||||
var (
|
|
||||||
client = fake.NewSimpleClientset()
|
|
||||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
|
||||||
logger = arktest.NewLogger()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
controller := NewGCController(
|
controller := NewGCController(
|
||||||
backupService,
|
nil,
|
||||||
snapshotService,
|
nil,
|
||||||
"bucket",
|
"bucket",
|
||||||
1*time.Millisecond,
|
1*time.Millisecond,
|
||||||
sharedInformers.Ark().V1().Backups(),
|
sharedInformers.Ark().V1().Backups(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
sharedInformers.Ark().V1().Restores(),
|
sharedInformers.Ark().V1().Restores(),
|
||||||
client.ArkV1(),
|
client.ArkV1(),
|
||||||
logger,
|
arktest.NewLogger(),
|
||||||
).(*gcController)
|
).(*gcController)
|
||||||
controller.clock = fakeClock
|
controller.clock = fakeClock
|
||||||
|
|
||||||
backupService.On("GetAllBackups", "bucket").Return(scenario.backups, nil)
|
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
|
||||||
|
|
||||||
// PASS 1
|
// PASS 1
|
||||||
controller.processBackups()
|
controller.run()
|
||||||
|
assert.Equal(t, 0, len(client.Actions()))
|
||||||
backupService.AssertExpectations(t)
|
|
||||||
assert.Equal(scenario.snapshots, snapshotService.SnapshotsTaken, "snapshots should not be garbage-collected yet.")
|
|
||||||
|
|
||||||
// PASS 2
|
// PASS 2
|
||||||
|
expectedActions := []core.Action{
|
||||||
|
core.NewDeleteAction(
|
||||||
|
api.SchemeGroupVersion.WithResource("backups"),
|
||||||
|
api.DefaultNamespace,
|
||||||
|
"backup-1",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
fakeClock.Step(1 * time.Minute)
|
fakeClock.Step(1 * time.Minute)
|
||||||
backupService.On("DeleteBackupDir", "bucket", "backup-1").Return(nil)
|
controller.run()
|
||||||
controller.processBackups()
|
|
||||||
|
|
||||||
assert.Equal(0, len(snapshotService.SnapshotsTaken), "snapshots should have been garbage-collected.")
|
assert.Equal(t, expectedActions, client.Actions())
|
||||||
|
}
|
||||||
backupService.AssertExpectations(t)
|
|
||||||
|
func TestHandleFinalizer(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
backup *api.Backup
|
||||||
|
deleteBackupDirError bool
|
||||||
|
expectGarbageCollect bool
|
||||||
|
expectedPatch []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil deletionTimestamp exits early",
|
||||||
|
backup: arktest.NewTestBackup().Backup,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no finalizers exits early",
|
||||||
|
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).Backup,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no gcFinalizer exits early",
|
||||||
|
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers("foo").Backup,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "error when calling garbageCollect exits without patch",
|
||||||
|
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer).Backup,
|
||||||
|
deleteBackupDirError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal case - patch includes the appropriate fields",
|
||||||
|
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer, "foo").WithResourceVersion("1").Backup,
|
||||||
|
expectGarbageCollect: true,
|
||||||
|
expectedPatch: []byte(`{"metadata":{"finalizers":["foo"],"resourceVersion":"1"}}`),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
backupService = &arktest.BackupService{}
|
||||||
|
client = fake.NewSimpleClientset()
|
||||||
|
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||||
|
controller = NewGCController(
|
||||||
|
backupService,
|
||||||
|
nil,
|
||||||
|
"bucket-1",
|
||||||
|
1*time.Millisecond,
|
||||||
|
sharedInformers.Ark().V1().Backups(),
|
||||||
|
client.ArkV1(),
|
||||||
|
sharedInformers.Ark().V1().Restores(),
|
||||||
|
client.ArkV1(),
|
||||||
|
arktest.NewLogger(),
|
||||||
|
).(*gcController)
|
||||||
|
)
|
||||||
|
|
||||||
|
if test.expectGarbageCollect {
|
||||||
|
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
|
||||||
|
} else if test.deleteBackupDirError {
|
||||||
|
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(errors.New("foo"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// METHOD UNDER TEST
|
||||||
|
controller.handleFinalizer(nil, test.backup)
|
||||||
|
|
||||||
|
// VERIFY
|
||||||
|
backupService.AssertExpectations(t)
|
||||||
|
|
||||||
|
actions := client.Actions()
|
||||||
|
|
||||||
|
if test.expectedPatch == nil {
|
||||||
|
assert.Equal(t, 0, len(actions))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(actions))
|
||||||
|
patchAction, ok := actions[0].(core.PatchAction)
|
||||||
|
require.True(t, ok, "action is not a PatchAction")
|
||||||
|
|
||||||
|
assert.Equal(t, test.expectedPatch, patchAction.GetPatch())
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,9 @@ import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/discovery"
|
||||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/util/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NamespaceAndName returns a string in the format <namespace>/<name>
|
// NamespaceAndName returns a string in the format <namespace>/<name>
|
||||||
|
@ -48,3 +50,17 @@ func EnsureNamespaceExists(namespace *v1.Namespace, client corev1.NamespaceInter
|
||||||
return false, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
|
return false, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ServerVersion(client discovery.DiscoveryInterface) (*version.Version, error) {
|
||||||
|
versionInfo, err := client.ServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error getting server version")
|
||||||
|
}
|
||||||
|
|
||||||
|
semVer, err := version.ParseSemantic(versionInfo.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error parsing server version")
|
||||||
|
}
|
||||||
|
|
||||||
|
return semVer, err
|
||||||
|
}
|
||||||
|
|
|
@ -114,3 +114,18 @@ func (b *TestBackup) WithSnapshotVolumesPointer(value *bool) *TestBackup {
|
||||||
b.Spec.SnapshotVolumes = value
|
b.Spec.SnapshotVolumes = value
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *TestBackup) WithDeletionTimestamp(time time.Time) *TestBackup {
|
||||||
|
b.DeletionTimestamp = &metav1.Time{Time: time}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *TestBackup) WithFinalizers(finalizers ...string) *TestBackup {
|
||||||
|
b.Finalizers = finalizers
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *TestBackup) WithResourceVersion(version string) *TestBackup {
|
||||||
|
b.ResourceVersion = version
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2017 The Kubernetes Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# verify-util-pkg.sh checks whether *.go except doc.go in pkg/util have been moved into
|
||||||
|
# sub-pkgs, see issue #15634.
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
set -o nounset
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
BASH_DIR=$(dirname "${BASH_SOURCE}")
|
||||||
|
|
||||||
|
find_go_files() {
|
||||||
|
find . -maxdepth 1 -not \( \
|
||||||
|
\( \
|
||||||
|
-wholename './doc.go' \
|
||||||
|
\) -prune \
|
||||||
|
\) -name '*.go'
|
||||||
|
}
|
||||||
|
|
||||||
|
ret=0
|
||||||
|
|
||||||
|
pushd "${BASH_DIR}" > /dev/null
|
||||||
|
for path in `find_go_files`; do
|
||||||
|
file=$(basename $path)
|
||||||
|
echo "Found pkg/util/${file}, but should be moved into util sub-pkgs." 1>&2
|
||||||
|
ret=1
|
||||||
|
done
|
||||||
|
popd > /dev/null
|
||||||
|
|
||||||
|
if [[ ${ret} > 0 ]]; then
|
||||||
|
exit ${ret}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Util Package Verified."
|
|
@ -0,0 +1,18 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package version provides utilities for version number comparisons
|
||||||
|
package version // import "k8s.io/kubernetes/pkg/util/version"
|
|
@ -0,0 +1,264 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is an opqaue representation of a version number
|
||||||
|
type Version struct {
|
||||||
|
components []uint
|
||||||
|
semver bool
|
||||||
|
preRelease string
|
||||||
|
buildMetadata string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// versionMatchRE splits a version string into numeric and "extra" parts
|
||||||
|
versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`)
|
||||||
|
// extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release
|
||||||
|
extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
func parse(str string, semver bool) (*Version, error) {
|
||||||
|
parts := versionMatchRE.FindStringSubmatch(str)
|
||||||
|
if parts == nil {
|
||||||
|
return nil, fmt.Errorf("could not parse %q as version", str)
|
||||||
|
}
|
||||||
|
numbers, extra := parts[1], parts[2]
|
||||||
|
|
||||||
|
components := strings.Split(numbers, ".")
|
||||||
|
if (semver && len(components) != 3) || (!semver && len(components) < 2) {
|
||||||
|
return nil, fmt.Errorf("illegal version string %q", str)
|
||||||
|
}
|
||||||
|
|
||||||
|
v := &Version{
|
||||||
|
components: make([]uint, len(components)),
|
||||||
|
semver: semver,
|
||||||
|
}
|
||||||
|
for i, comp := range components {
|
||||||
|
if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" {
|
||||||
|
return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
|
||||||
|
}
|
||||||
|
num, err := strconv.ParseUint(comp, 10, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err)
|
||||||
|
}
|
||||||
|
v.components[i] = uint(num)
|
||||||
|
}
|
||||||
|
|
||||||
|
if semver && extra != "" {
|
||||||
|
extraParts := extraMatchRE.FindStringSubmatch(extra)
|
||||||
|
if extraParts == nil {
|
||||||
|
return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str)
|
||||||
|
}
|
||||||
|
v.preRelease, v.buildMetadata = extraParts[1], extraParts[2]
|
||||||
|
|
||||||
|
for _, comp := range strings.Split(v.preRelease, ".") {
|
||||||
|
if _, err := strconv.ParseUint(comp, 10, 0); err == nil {
|
||||||
|
if strings.HasPrefix(comp, "0") && comp != "0" {
|
||||||
|
return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseGeneric parses a "generic" version string. The version string must consist of two
|
||||||
|
// or more dot-separated numeric fields (the first of which can't have leading zeroes),
|
||||||
|
// followed by arbitrary uninterpreted data (which need not be separated from the final
|
||||||
|
// numeric field by punctuation). For convenience, leading and trailing whitespace is
|
||||||
|
// ignored, and the version can be preceded by the letter "v". See also ParseSemantic.
|
||||||
|
func ParseGeneric(str string) (*Version, error) {
|
||||||
|
return parse(str, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseGeneric is like ParseGeneric except that it panics on error
|
||||||
|
func MustParseGeneric(str string) *Version {
|
||||||
|
v, err := ParseGeneric(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSemantic parses a version string that exactly obeys the syntax and semantics of
|
||||||
|
// the "Semantic Versioning" specification (http://semver.org/) (although it ignores
|
||||||
|
// leading and trailing whitespace, and allows the version to be preceded by "v"). For
|
||||||
|
// version strings that are not guaranteed to obey the Semantic Versioning syntax, use
|
||||||
|
// ParseGeneric.
|
||||||
|
func ParseSemantic(str string) (*Version, error) {
|
||||||
|
return parse(str, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustParseSemantic is like ParseSemantic except that it panics on error
|
||||||
|
func MustParseSemantic(str string) *Version {
|
||||||
|
v, err := ParseSemantic(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Major returns the major release number
|
||||||
|
func (v *Version) Major() uint {
|
||||||
|
return v.components[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minor returns the minor release number
|
||||||
|
func (v *Version) Minor() uint {
|
||||||
|
return v.components[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Patch returns the patch release number if v is a Semantic Version, or 0
|
||||||
|
func (v *Version) Patch() uint {
|
||||||
|
if len(v.components) < 3 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return v.components[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildMetadata returns the build metadata, if v is a Semantic Version, or ""
|
||||||
|
func (v *Version) BuildMetadata() string {
|
||||||
|
return v.buildMetadata
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreRelease returns the prerelease metadata, if v is a Semantic Version, or ""
|
||||||
|
func (v *Version) PreRelease() string {
|
||||||
|
return v.preRelease
|
||||||
|
}
|
||||||
|
|
||||||
|
// Components returns the version number components
|
||||||
|
func (v *Version) Components() []uint {
|
||||||
|
return v.components
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts a Version back to a string; note that for versions parsed with
|
||||||
|
// ParseGeneric, this will not include the trailing uninterpreted portion of the version
|
||||||
|
// number.
|
||||||
|
func (v *Version) String() string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
for i, comp := range v.components {
|
||||||
|
if i > 0 {
|
||||||
|
buffer.WriteString(".")
|
||||||
|
}
|
||||||
|
buffer.WriteString(fmt.Sprintf("%d", comp))
|
||||||
|
}
|
||||||
|
if v.preRelease != "" {
|
||||||
|
buffer.WriteString("-")
|
||||||
|
buffer.WriteString(v.preRelease)
|
||||||
|
}
|
||||||
|
if v.buildMetadata != "" {
|
||||||
|
buffer.WriteString("+")
|
||||||
|
buffer.WriteString(v.buildMetadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0
|
||||||
|
// if they are equal
|
||||||
|
func (v *Version) compareInternal(other *Version) int {
|
||||||
|
for i := range v.components {
|
||||||
|
switch {
|
||||||
|
case i >= len(other.components):
|
||||||
|
if v.components[i] != 0 {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
case other.components[i] < v.components[i]:
|
||||||
|
return 1
|
||||||
|
case other.components[i] > v.components[i]:
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !v.semver || !other.semver {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case v.preRelease == "" && other.preRelease != "":
|
||||||
|
return 1
|
||||||
|
case v.preRelease != "" && other.preRelease == "":
|
||||||
|
return -1
|
||||||
|
case v.preRelease == other.preRelease: // includes case where both are ""
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
vPR := strings.Split(v.preRelease, ".")
|
||||||
|
oPR := strings.Split(other.preRelease, ".")
|
||||||
|
for i := range vPR {
|
||||||
|
if i >= len(oPR) {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
vNum, err := strconv.ParseUint(vPR[i], 10, 0)
|
||||||
|
if err == nil {
|
||||||
|
oNum, err := strconv.ParseUint(oPR[i], 10, 0)
|
||||||
|
if err == nil {
|
||||||
|
switch {
|
||||||
|
case oNum < vNum:
|
||||||
|
return 1
|
||||||
|
case oNum > vNum:
|
||||||
|
return -1
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if oPR[i] < vPR[i] {
|
||||||
|
return 1
|
||||||
|
} else if oPR[i] > vPR[i] {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtLeast tests if a version is at least equal to a given minimum version. If both
|
||||||
|
// Versions are Semantic Versions, this will use the Semantic Version comparison
|
||||||
|
// algorithm. Otherwise, it will compare only the numeric components, with non-present
|
||||||
|
// components being considered "0" (ie, "1.4" is equal to "1.4.0").
|
||||||
|
func (v *Version) AtLeast(min *Version) bool {
|
||||||
|
return v.compareInternal(min) != -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThan tests if a version is less than a given version. (It is exactly the opposite
|
||||||
|
// of AtLeast, for situations where asking "is v too old?" makes more sense than asking
|
||||||
|
// "is v new enough?".)
|
||||||
|
func (v *Version) LessThan(other *Version) bool {
|
||||||
|
return v.compareInternal(other) == -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare compares v against a version string (which will be parsed as either Semantic
|
||||||
|
// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
|
||||||
|
// it is greater than other, or 0 if they are equal.
|
||||||
|
func (v *Version) Compare(other string) (int, error) {
|
||||||
|
ov, err := parse(other, v.semver)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return v.compareInternal(ov), nil
|
||||||
|
}
|
Loading…
Reference in New Issue