Merge pull request #252 from skriss/delete-backup

add delete backup cmd using finalizer and simplify GC process
pull/243/head
Andy Goldstein 2018-01-02 14:26:03 -05:00 committed by GitHub
commit 6b0b6373da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 993 additions and 290 deletions

4
Gopkg.lock generated
View File

@ -419,13 +419,13 @@
[[projects]]
name = "k8s.io/kubernetes"
packages = ["pkg/printers"]
packages = ["pkg/printers","pkg/util/version"]
revision = "bdaeafa71f6c7c04636251031f93464384d54963"
version = "v1.8.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c3cd1b703421685e5b2343ced6eaa6ec958b9c44d62277322f4c93de164c2d04"
inputs-digest = "cd582891fb7e89c2ea28ea41e52687e2902b6105c6c3bf989bd628ebb4b72208"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -30,6 +30,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
### SEE ALSO
* [ark backup](ark_backup.md) - Work with backups
* [ark create](ark_create.md) - Create ark resources
* [ark delete](ark_delete.md) - Delete ark resources
* [ark describe](ark_describe.md) - Describe ark resources
* [ark get](ark_get.md) - Get ark resources
* [ark plugin](ark_plugin.md) - Work with plugins

View File

@ -29,6 +29,7 @@ Work with backups
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark backup create](ark_backup_create.md) - Create a backup
* [ark backup delete](ark_backup_delete.md) - Delete a backup
* [ark backup describe](ark_backup_describe.md) - Describe backups
* [ark backup download](ark_backup_download.md) - Download a backup
* [ark backup get](ark_backup_get.md) - Get backups

View File

@ -0,0 +1,35 @@
## ark backup delete
Delete a backup
### Synopsis
Delete a backup
```
ark backup delete NAME [flags]
```
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark backup](ark_backup.md) - Work with backups

View File

@ -0,0 +1,34 @@
## ark delete
Delete ark resources
### Synopsis
Delete ark resources
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark delete backup](ark_delete_backup.md) - Delete a backup
* [ark delete restore](ark_delete_restore.md) - Delete a restore
* [ark delete schedule](ark_delete_schedule.md) - Delete a schedule

View File

@ -0,0 +1,35 @@
## ark delete backup
Delete a backup
### Synopsis
Delete a backup
```
ark delete backup NAME [flags]
```
### Options
```
-h, --help help for backup
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark delete](ark_delete.md) - Delete ark resources

View File

@ -0,0 +1,35 @@
## ark delete restore
Delete a restore
### Synopsis
Delete a restore
```
ark delete restore NAME [flags]
```
### Options
```
-h, --help help for restore
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark delete](ark_delete.md) - Delete ark resources

View File

@ -0,0 +1,35 @@
## ark delete schedule
Delete a schedule
### Synopsis
Delete a schedule
```
ark delete schedule NAME [flags]
```
### Options
```
-h, --help help for schedule
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark delete](ark_delete.md) - Delete ark resources

View File

@ -24,6 +24,7 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
"github.com/heptio/ark/pkg/cmd/cli/create"
"github.com/heptio/ark/pkg/cmd/cli/delete"
"github.com/heptio/ark/pkg/cmd/cli/describe"
"github.com/heptio/ark/pkg/cmd/cli/get"
"github.com/heptio/ark/pkg/cmd/cli/plugin"
@ -61,6 +62,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
create.NewCommand(f),
runplugin.NewCommand(),
plugin.NewCommand(f),
delete.NewCommand(f),
)
// add the glog flags

View File

@ -35,10 +35,7 @@ func NewCommand(f client.Factory) *cobra.Command {
NewLogsCommand(f),
NewDescribeCommand(f, "describe"),
NewDownloadCommand(f),
// If you delete a backup and it still exists in object storage, the backup sync controller will
// recreate it. Until we have a good UX around this, we're disabling the delete command.
// NewDeleteCommand(f),
NewDeleteCommand(f, "delete"),
)
return c

View File

@ -20,16 +20,19 @@ import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/controller"
kubeutil "github.com/heptio/ark/pkg/util/kube"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Use: fmt.Sprintf("%s NAME", use),
Short: "Delete a backup",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
@ -37,6 +40,16 @@ func NewDeleteCommand(f client.Factory) *cobra.Command {
os.Exit(1)
}
kubeClient, err := f.KubeClient()
cmd.CheckError(err)
serverVersion, err := kubeutil.ServerVersion(kubeClient.Discovery())
cmd.CheckError(err)
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
cmd.CheckError(errors.Errorf("this command requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete))
}
arkClient, err := f.Client()
cmd.CheckError(err)

View File

@ -0,0 +1,51 @@
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package delete
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
"github.com/heptio/ark/pkg/cmd/cli/restore"
"github.com/heptio/ark/pkg/cmd/cli/schedule"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "delete",
Short: "Delete ark resources",
Long: "Delete ark resources",
}
backupCommand := backup.NewDeleteCommand(f, "backup")
backupCommand.Aliases = []string{"backups"}
restoreCommand := restore.NewDeleteCommand(f, "restore")
restoreCommand.Aliases = []string{"restores"}
scheduleCommand := schedule.NewDeleteCommand(f, "schedule")
scheduleCommand.Aliases = []string{"schedules"}
c.AddCommand(
backupCommand,
restoreCommand,
scheduleCommand,
)
return c
}

View File

@ -27,9 +27,9 @@ import (
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Use: fmt.Sprintf("%s NAME", use),
Short: "Delete a restore",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {

View File

@ -34,7 +34,7 @@ func NewCommand(f client.Factory) *cobra.Command {
NewGetCommand(f, "get"),
NewLogsCommand(f),
NewDescribeCommand(f, "describe"),
NewDeleteCommand(f),
NewDeleteCommand(f, "delete"),
)
return c

View File

@ -27,9 +27,9 @@ import (
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Use: fmt.Sprintf("%s NAME", use),
Short: "Delete a schedule",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {

View File

@ -33,7 +33,7 @@ func NewCommand(f client.Factory) *cobra.Command {
NewCreateCommand(f, "create"),
NewGetCommand(f, "get"),
NewDescribeCommand(f, "describe"),
NewDeleteCommand(f),
NewDeleteCommand(f, "delete"),
)
return c

View File

@ -55,6 +55,7 @@ import (
"github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/restore"
"github.com/heptio/ark/pkg/util/kube"
kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
)
@ -470,22 +471,31 @@ func (s *server) runControllers(config *api.Config) error {
wg.Done()
}()
gcController := controller.NewGCController(
s.backupService,
s.snapshotService,
config.BackupStorageProvider.Bucket,
config.GCSyncPeriod.Duration,
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(),
s.logger,
)
wg.Add(1)
go func() {
gcController.Run(ctx, 1)
wg.Done()
}()
serverVersion, err := kubeutil.ServerVersion(s.kubeClient.Discovery())
if err != nil {
return err
}
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
s.logger.Errorf("Garbage-collection is disabled because it requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete)
} else {
gcController := controller.NewGCController(
s.backupService,
s.snapshotService,
config.BackupStorageProvider.Bucket,
config.GCSyncPeriod.Duration,
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(),
s.logger,
)
wg.Add(1)
go func() {
gcController.Run(ctx, 1)
wg.Done()
}()
}
}
restorer, err := newRestorer(

View File

@ -234,6 +234,11 @@ func (controller *backupController) processBackup(key string) error {
// set backup version
backup.Status.Version = backupVersion
// add GC finalizer if it's not there already
if !has(backup.Finalizers, gcFinalizer) {
backup.Finalizers = append(backup.Finalizers, gcFinalizer)
}
// calculate expiration
if backup.Spec.TTL.Duration > 0 {
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))

View File

@ -190,6 +190,7 @@ func TestProcessBackup(t *testing.T) {
backup.Status.Phase = v1.BackupPhaseInProgress
backup.Status.Expiration.Time = expiration
backup.Status.Version = 1
backup.Finalizers = []string{gcFinalizer}
backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil)
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil)
@ -225,6 +226,7 @@ func TestProcessBackup(t *testing.T) {
res.Status.Version = 1
res.Status.Expiration.Time = expiration
res.Status.Phase = v1.BackupPhase(phase)
res.Finalizers = []string{gcFinalizer}
return true, res, nil
})
@ -247,14 +249,15 @@ func TestProcessBackup(t *testing.T) {
actions := client.Actions()
require.Equal(t, 2, len(actions))
// validate Patch call 1 (setting version, expiration, and phase)
// validate Patch call 1 (setting finalizer, version, expiration, and phase)
patchAction, ok := actions[0].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
patch := make(map[string]interface{})
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")
// should have metadata and status
assert.Equal(t, 2, len(patch), "patch has wrong number of keys")
expectedStatusKeys := 2
if test.backup.Spec.TTL.Duration > 0 {
@ -268,10 +271,19 @@ func TestProcessBackup(t *testing.T) {
res, _ := collections.GetMap(patch, "status")
assert.Equal(t, expectedStatusKeys, len(res), "patch's status has the wrong number of keys")
finalizers, err := collections.GetSlice(patch, "metadata.finalizers")
require.NoError(t, err, "patch does not contain metadata.finalizers")
assert.Equal(t, 1, len(finalizers))
assert.Equal(t, gcFinalizer, finalizers[0])
res, _ = collections.GetMap(patch, "metadata")
assert.Equal(t, 1, len(res), "patch's metadata has the wrong number of keys")
// validate Patch call 2 (setting phase)
patchAction, ok = actions[1].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
patch = make(map[string]interface{})
require.NoError(t, json.Unmarshal(patchAction.GetPatch(), &patch), "cannot unmarshal patch")
assert.Equal(t, 1, len(patch), "patch has wrong number of keys")

View File

@ -18,6 +18,7 @@ package controller
import (
"context"
"encoding/json"
"time"
"github.com/pkg/errors"
@ -25,9 +26,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/util/version"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
@ -37,6 +41,15 @@ import (
"github.com/heptio/ark/pkg/util/kube"
)
const gcFinalizer = "gc.ark.heptio.com"
// MinVersionForDelete is the minimum Kubernetes server version that Ark
// requires in order to be able to properly delete backups (including
// the associated snapshots and object storage files). This is because
// Ark uses finalizers on the backup CRD to implement garbage-collection
// and deletion.
var MinVersionForDelete = version.MustParseSemantic("1.7.5")
// gcController removes expired backup content from object storage.
type gcController struct {
backupService cloudprovider.BackupService
@ -70,7 +83,7 @@ func NewGCController(
syncPeriod = time.Minute
}
return &gcController{
c := &gcController{
backupService: backupService,
snapshotService: snapshotService,
bucket: bucket,
@ -84,9 +97,87 @@ func NewGCController(
restoreClient: restoreClient,
logger: logger,
}
backupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: c.handleFinalizer,
},
)
return c
}
var _ Interface = &gcController{}
// handleFinalizer runs garbage-collection on a backup that has the Ark GC
// finalizer and a deletionTimestamp.
func (c *gcController) handleFinalizer(_, newObj interface{}) {
var (
backup = newObj.(*api.Backup)
log = c.logger.WithField("backup", kube.NamespaceAndName(backup))
)
// we're only interested in backups that have a deletionTimestamp and at
// least one finalizer.
if backup.DeletionTimestamp == nil || len(backup.Finalizers) == 0 {
return
}
log.Debugf("Backup has finalizers %s", backup.Finalizers)
if !has(backup.Finalizers, gcFinalizer) {
return
}
log.Infof("Garbage-collecting backup")
if err := c.garbageCollect(backup, log); err != nil {
// if there were errors deleting related cloud resources, don't
// delete the backup API object because we don't want to orphan
// the cloud resources.
log.WithError(err).Error("Error deleting backup's related objects")
return
}
patchMap := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": except(backup.Finalizers, gcFinalizer),
"resourceVersion": backup.ResourceVersion,
},
}
patchBytes, err := json.Marshal(patchMap)
if err != nil {
log.WithError(err).Error("Error marshaling finalizers patch")
return
}
if _, err = c.backupClient.Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes); err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup")
}
}
// has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
}
return false
}
// except returns a new string slice that contains all of the entries
// from `items` except `val`.
func except(items []string, val string) []string {
var newItems []string
for _, itm := range items {
if itm != val {
newItems = append(newItems, itm)
}
}
return newItems
}
// Run is a blocking function that runs a single worker to garbage-collect backups
// from object/block storage and the Ark API. It will return when it receives on the
@ -103,104 +194,74 @@ func (c *gcController) Run(ctx context.Context, workers int) error {
}
func (c *gcController) run() {
c.processBackups()
}
now := c.clock.Now()
c.logger.Info("Garbage-collecting expired backups")
// garbageCollectBackup removes an expired backup by deleting any associated backup files (if
// deleteBackupFiles = true), volume snapshots, restore API objects, and the backup API object
// itself.
func (c *gcController) garbageCollectBackup(backup *api.Backup, deleteBackupFiles bool) {
logContext := c.logger.WithField("backup", kube.NamespaceAndName(backup))
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
logContext.Warning("Cannot garbage-collect backup because backup includes snapshots and server is not configured with PersistentVolumeProvider")
// Go thru API objects and delete expired ones (finalizer will GC their
// corresponding files/snapshots/restores). Note that we're ignoring backups
// in object storage that haven't been synced to Kubernetes yet; they'll
// be processed for GC (if applicable) once they've been synced.
backups, err := c.backupLister.List(labels.Everything())
if err != nil {
c.logger.WithError(errors.WithStack(err)).Error("Error getting all backups")
return
}
// The GC process is primarily intended to delete expired cloud resources (file in object
// storage, snapshots). If we fail to delete any of these, we don't delete the Backup API
// object or metadata file in object storage so that we don't orphan the cloud resources.
deletionFailure := false
for _, backup := range backups {
log := c.logger.WithField("backup", kube.NamespaceAndName(backup))
if backup.Status.Expiration.Time.After(now) {
log.Debug("Backup has not expired yet, skipping")
continue
}
// since backups have a finalizer, this will actually have the effect of setting a deletionTimestamp and calling
// an update. The update will be handled by this controller and will result in a deletion of the obj storage
// files and the API object.
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting backup")
}
}
}
// garbageCollect prepares for deleting an expired backup by deleting any
// associated backup files, volume snapshots, or restore API objects.
func (c *gcController) garbageCollect(backup *api.Backup, log logrus.FieldLogger) error {
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
return errors.New("cannot garbage-collect backup because it includes snapshots and Ark is not configured with a PersistentVolumeProvider")
}
var errs []error
for _, volumeBackup := range backup.Status.VolumeBackups {
logContext.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
log.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
logContext.WithError(err).WithField("snapshotID", volumeBackup.SnapshotID).Error("Error deleting snapshot")
deletionFailure = true
errs = append(errs, errors.Wrapf(err, "error deleting snapshot %s", volumeBackup.SnapshotID))
}
}
// If applicable, delete everything in the backup dir in object storage *before* deleting the API object
// because otherwise the backup sync controller could re-sync the backup from object storage.
if deleteBackupFiles {
logContext.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
logContext.WithError(err).Error("Error deleting backup")
deletionFailure = true
}
log.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
errs = append(errs, errors.Wrap(err, "error deleting backup from object storage"))
}
logContext.Info("Getting restore API objects referencing backup")
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
logContext.WithError(errors.WithStack(err)).Error("Error getting Restore API objects")
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
} else {
for _, restore := range restores {
if restore.Spec.BackupName == backup.Name {
logContext.WithField("restore", kube.NamespaceAndName(restore)).Info("Removing Restore API object referencing Backup")
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
logContext.WithError(errors.WithStack(err)).WithField("restore", kube.NamespaceAndName(restore)).
Error("Error deleting Restore API object")
}
if restore.Spec.BackupName != backup.Name {
continue
}
restoreLog := log.WithField("restore", kube.NamespaceAndName(restore))
restoreLog.Info("Deleting restore referencing backup")
if err := c.restoreClient.Restores(restore.Namespace).Delete(restore.Name, &metav1.DeleteOptions{}); err != nil {
restoreLog.WithError(errors.WithStack(err)).Error("Error deleting restore")
}
}
}
if deletionFailure {
logContext.Warning("Backup will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots")
return
}
logContext.Info("Removing Backup API object")
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
logContext.WithError(errors.WithStack(err)).Error("Error deleting Backup API object")
}
}
// garbageCollectBackups checks backups for expiration and triggers garbage-collection for the expired
// ones.
func (c *gcController) garbageCollectBackups(backups []*api.Backup, expiration time.Time, deleteBackupFiles bool) {
for _, backup := range backups {
if backup.Status.Expiration.Time.After(expiration) {
c.logger.WithField("backup", kube.NamespaceAndName(backup)).Info("Backup has not expired yet, skipping")
continue
}
c.garbageCollectBackup(backup, deleteBackupFiles)
}
}
// processBackups gets backups from object storage and the API and submits
// them for garbage-collection.
func (c *gcController) processBackups() {
now := c.clock.Now()
c.logger.WithField("now", now).Info("Garbage-collecting backups that have expired as of now")
// GC backups in object storage. We do this in addition
// to GC'ing API objects to prevent orphan backup files.
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
c.logger.WithError(err).Error("Error getting all backups from object storage")
return
}
c.garbageCollectBackups(backups, now, true)
// GC backups without files in object storage
apiBackups, err := c.backupLister.List(labels.Everything())
if err != nil {
c.logger.WithError(errors.WithStack(err)).Error("Error getting all Backup API objects")
return
}
c.garbageCollectBackups(apiBackups, now, false)
return kerrors.NewAggregate(errs)
}

View File

@ -17,38 +17,37 @@ limitations under the License.
package controller
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
core "k8s.io/client-go/testing"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
arktest "github.com/heptio/ark/pkg/util/test"
)
type gcTest struct {
name string
backups []*api.Backup
snapshots sets.String
nilSnapshotService bool
expectedDeletions sets.String
expectedSnapshotsRemaining sets.String
}
func TestGarbageCollect(t *testing.T) {
func TestGCControllerRun(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
tests := []gcTest{
tests := []struct {
name string
backups []*api.Backup
snapshots sets.String
expectedDeletions sets.String
}{
{
name: "basic-expired",
name: "no backups results in no deletions",
},
{
name: "expired backup is deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
@ -56,12 +55,10 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString(),
expectedDeletions: sets.NewString("backup-1"),
},
{
name: "basic-unexpired",
name: "unexpired backup is not deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
@ -69,12 +66,10 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString(),
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2"),
expectedDeletions: sets.NewString(),
},
{
name: "one expired, one unexpired",
name: "expired backup is deleted and unexpired backup is not deleted",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
@ -87,144 +82,91 @@ func TestGarbageCollect(t *testing.T) {
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
{
name: "none expired in target bucket",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString(),
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
},
{
name: "orphan snapshots",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedDeletions: sets.NewString("backup-1"),
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
{
name: "no snapshot service only GC's backups without snapshots",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
arktest.NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
nilSnapshotService: true,
expectedDeletions: sets.NewString("backup-2"),
expectedDeletions: sets.NewString("backup-1"),
},
}
for _, test := range tests {
var (
backupService = &arktest.BackupService{}
snapshotService *arktest.FakeSnapshotService
)
if !test.nilSnapshotService {
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
}
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
snapSvc cloudprovider.SnapshotService
bucket = "bucket"
logger = arktest.NewLogger()
)
if snapshotService != nil {
snapSvc = snapshotService
}
controller := NewGCController(
backupService,
snapSvc,
bucket,
nil,
nil,
"bucket",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
controller.clock = fakeClock
backupService.On("GetAllBackups", bucket).Return(test.backups, nil)
for _, b := range test.expectedDeletions.List() {
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
for _, backup := range test.backups {
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
}
controller.processBackups()
if !test.nilSnapshotService {
assert.Equal(t, test.expectedSnapshotsRemaining, snapshotService.SnapshotsTaken)
expectedDeletions := make([]core.Action, 0, len(test.expectedDeletions))
for backup := range test.expectedDeletions {
expectedDeletions = append(expectedDeletions, core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
backup,
))
}
backupService.AssertExpectations(t)
controller.run()
assert.Equal(t, expectedDeletions, client.Actions())
})
}
}
func TestGarbageCollectBackup(t *testing.T) {
tests := []struct {
name string
backup *api.Backup
deleteBackupFile bool
snapshots sets.String
backupFiles sets.String
backupMetadataFiles sets.String
restores []*api.Restore
expectedRestoreDeletes []string
expectedBackupDelete string
expectedSnapshots sets.String
expectedObjectStorageDeletions sets.String
name string
backup *api.Backup
snapshots sets.String
restores []*api.Restore
nilSnapshotService bool
expectErr bool
expectBackupDirDeleted bool
}{
{
name: "deleteBackupFile=false, snapshot deletion fails, don't delete kube backup",
name: "nil snapshot service when backup has snapshots returns error",
backup: arktest.NewTestBackup().WithName("backup-1").WithSnapshot("pv-1", "snap-1").Backup,
nilSnapshotService: true,
expectErr: true,
},
{
name: "nil snapshot service when backup doesn't have snapshots correctly garbage-collects",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
nilSnapshotService: true,
expectBackupDirDeleted: true,
},
{
name: "return error if snapshot deletion fails",
backup: arktest.NewTestBackup().WithName("backup-1").
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
deleteBackupFile: false,
snapshots: sets.NewString("snapshot-1"),
expectedSnapshots: sets.NewString(),
expectedObjectStorageDeletions: sets.NewString(),
snapshots: sets.NewString("snapshot-1"),
expectBackupDirDeleted: true,
expectErr: true,
},
{
name: "related restores should be deleted",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
deleteBackupFile: true,
snapshots: sets.NewString(),
name: "related restores should be deleted",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
snapshots: sets.NewString(),
restores: []*api.Restore{
arktest.NewTestRestore(api.DefaultNamespace, "restore-1", api.RestorePhaseCompleted).WithBackup("backup-1").Restore,
arktest.NewTestRestore(api.DefaultNamespace, "restore-2", api.RestorePhaseCompleted).WithBackup("backup-2").Restore,
},
expectedRestoreDeletes: []string{"restore-1"},
expectedBackupDelete: "backup-1",
expectedSnapshots: sets.NewString(),
expectedObjectStorageDeletions: sets.NewString("backup-1"),
expectBackupDirDeleted: true,
},
}
@ -235,61 +177,67 @@ func TestGarbageCollectBackup(t *testing.T) {
snapshotService = &arktest.FakeSnapshotService{SnapshotsTaken: test.snapshots}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
bucket = "bucket-1"
logger = arktest.NewLogger()
controller = NewGCController(
backupService,
snapshotService,
bucket,
"bucket-1",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
)
if test.nilSnapshotService {
controller.snapshotService = nil
}
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
for _, restore := range test.restores {
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore)
}
for _, b := range test.expectedObjectStorageDeletions.List() {
backupService.On("DeleteBackupDir", bucket, b).Return(nil)
if test.expectBackupDirDeleted {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
}
// METHOD UNDER TEST
controller.garbageCollectBackup(test.backup, test.deleteBackupFile)
err := controller.garbageCollect(test.backup, controller.logger)
// VERIFY:
// remaining snapshots
assert.Equal(t, test.expectedSnapshots, snapshotService.SnapshotsTaken)
// error
assert.Equal(t, test.expectErr, err != nil)
// remaining snapshots
if !test.nilSnapshotService {
backupSnapshots := sets.NewString()
for _, snapshot := range test.backup.Status.VolumeBackups {
backupSnapshots.Insert(snapshot.SnapshotID)
}
assert.Equal(t, test.snapshots.Difference(backupSnapshots), snapshotService.SnapshotsTaken)
}
// restore client deletes
expectedActions := make([]core.Action, 0)
// Restore client deletes
for _, restore := range test.expectedRestoreDeletes {
for _, restore := range test.restores {
if restore.Spec.BackupName != test.backup.Name {
continue
}
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("restores"),
api.DefaultNamespace,
restore,
restore.Name,
)
expectedActions = append(expectedActions, action)
}
// Backup client deletes
if test.expectedBackupDelete != "" {
action := core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
test.expectedBackupDelete,
)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
// backup dir deletion
backupService.AssertExpectations(t)
})
}
@ -297,59 +245,126 @@ func TestGarbageCollectBackup(t *testing.T) {
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
backupService = &arktest.BackupService{}
snapshotService = &arktest.FakeSnapshotService{}
fakeClock = clock.NewFakeClock(time.Now())
assert = assert.New(t)
)
scenario := gcTest{
name: "basic-expired",
backups: []*api.Backup{
arktest.NewTestBackup().WithName("backup-1").
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
backup = arktest.NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
}
snapshotService.SnapshotsTaken = scenario.snapshots
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
logger = arktest.NewLogger()
Backup
)
controller := NewGCController(
backupService,
snapshotService,
nil,
nil,
"bucket",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
logger,
arktest.NewLogger(),
).(*gcController)
controller.clock = fakeClock
backupService.On("GetAllBackups", "bucket").Return(scenario.backups, nil)
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup)
// PASS 1
controller.processBackups()
backupService.AssertExpectations(t)
assert.Equal(scenario.snapshots, snapshotService.SnapshotsTaken, "snapshots should not be garbage-collected yet.")
controller.run()
assert.Equal(t, 0, len(client.Actions()))
// PASS 2
expectedActions := []core.Action{
core.NewDeleteAction(
api.SchemeGroupVersion.WithResource("backups"),
api.DefaultNamespace,
"backup-1",
),
}
fakeClock.Step(1 * time.Minute)
backupService.On("DeleteBackupDir", "bucket", "backup-1").Return(nil)
controller.processBackups()
controller.run()
assert.Equal(0, len(snapshotService.SnapshotsTaken), "snapshots should have been garbage-collected.")
backupService.AssertExpectations(t)
assert.Equal(t, expectedActions, client.Actions())
}
func TestHandleFinalizer(t *testing.T) {
tests := []struct {
name string
backup *api.Backup
deleteBackupDirError bool
expectGarbageCollect bool
expectedPatch []byte
}{
{
name: "nil deletionTimestamp exits early",
backup: arktest.NewTestBackup().Backup,
},
{
name: "no finalizers exits early",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).Backup,
},
{
name: "no gcFinalizer exits early",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers("foo").Backup,
},
{
name: "error when calling garbageCollect exits without patch",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer).Backup,
deleteBackupDirError: true,
},
{
name: "normal case - patch includes the appropriate fields",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer, "foo").WithResourceVersion("1").Backup,
expectGarbageCollect: true,
expectedPatch: []byte(`{"metadata":{"finalizers":["foo"],"resourceVersion":"1"}}`),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
backupService = &arktest.BackupService{}
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
controller = NewGCController(
backupService,
nil,
"bucket-1",
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
arktest.NewLogger(),
).(*gcController)
)
if test.expectGarbageCollect {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(nil)
} else if test.deleteBackupDirError {
backupService.On("DeleteBackupDir", controller.bucket, test.backup.Name).Return(errors.New("foo"))
}
// METHOD UNDER TEST
controller.handleFinalizer(nil, test.backup)
// VERIFY
backupService.AssertExpectations(t)
actions := client.Actions()
if test.expectedPatch == nil {
assert.Equal(t, 0, len(actions))
return
}
require.Equal(t, 1, len(actions))
patchAction, ok := actions[0].(core.PatchAction)
require.True(t, ok, "action is not a PatchAction")
assert.Equal(t, test.expectedPatch, patchAction.GetPatch())
})
}
}

View File

@ -24,7 +24,9 @@ import (
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/util/version"
)
// NamespaceAndName returns a string in the format <namespace>/<name>
@ -48,3 +50,17 @@ func EnsureNamespaceExists(namespace *v1.Namespace, client corev1.NamespaceInter
return false, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
}
}
func ServerVersion(client discovery.DiscoveryInterface) (*version.Version, error) {
versionInfo, err := client.ServerVersion()
if err != nil {
return nil, errors.Wrap(err, "error getting server version")
}
semVer, err := version.ParseSemantic(versionInfo.String())
if err != nil {
return nil, errors.Wrap(err, "error parsing server version")
}
return semVer, err
}

View File

@ -114,3 +114,18 @@ func (b *TestBackup) WithSnapshotVolumesPointer(value *bool) *TestBackup {
b.Spec.SnapshotVolumes = value
return b
}
func (b *TestBackup) WithDeletionTimestamp(time time.Time) *TestBackup {
b.DeletionTimestamp = &metav1.Time{Time: time}
return b
}
func (b *TestBackup) WithFinalizers(finalizers ...string) *TestBackup {
b.Finalizers = finalizers
return b
}
func (b *TestBackup) WithResourceVersion(version string) *TestBackup {
b.ResourceVersion = version
return b
}

48
vendor/k8s.io/kubernetes/pkg/util/verify-util-pkg.sh generated vendored Executable file
View File

@ -0,0 +1,48 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# verify-util-pkg.sh checks whether *.go except doc.go in pkg/util have been moved into
# sub-pkgs, see issue #15634.
set -o errexit
set -o nounset
set -o pipefail
BASH_DIR=$(dirname "${BASH_SOURCE}")
find_go_files() {
find . -maxdepth 1 -not \( \
\( \
-wholename './doc.go' \
\) -prune \
\) -name '*.go'
}
ret=0
pushd "${BASH_DIR}" > /dev/null
for path in `find_go_files`; do
file=$(basename $path)
echo "Found pkg/util/${file}, but should be moved into util sub-pkgs." 1>&2
ret=1
done
popd > /dev/null
if [[ ${ret} > 0 ]]; then
exit ${ret}
fi
echo "Util Package Verified."

18
vendor/k8s.io/kubernetes/pkg/util/version/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package version provides utilities for version number comparisons
package version // import "k8s.io/kubernetes/pkg/util/version"

264
vendor/k8s.io/kubernetes/pkg/util/version/version.go generated vendored Normal file
View File

@ -0,0 +1,264 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
)
// Version is an opqaue representation of a version number
type Version struct {
components []uint
semver bool
preRelease string
buildMetadata string
}
var (
// versionMatchRE splits a version string into numeric and "extra" parts
versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`)
// extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release
extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`)
)
func parse(str string, semver bool) (*Version, error) {
parts := versionMatchRE.FindStringSubmatch(str)
if parts == nil {
return nil, fmt.Errorf("could not parse %q as version", str)
}
numbers, extra := parts[1], parts[2]
components := strings.Split(numbers, ".")
if (semver && len(components) != 3) || (!semver && len(components) < 2) {
return nil, fmt.Errorf("illegal version string %q", str)
}
v := &Version{
components: make([]uint, len(components)),
semver: semver,
}
for i, comp := range components {
if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" {
return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
}
num, err := strconv.ParseUint(comp, 10, 0)
if err != nil {
return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err)
}
v.components[i] = uint(num)
}
if semver && extra != "" {
extraParts := extraMatchRE.FindStringSubmatch(extra)
if extraParts == nil {
return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str)
}
v.preRelease, v.buildMetadata = extraParts[1], extraParts[2]
for _, comp := range strings.Split(v.preRelease, ".") {
if _, err := strconv.ParseUint(comp, 10, 0); err == nil {
if strings.HasPrefix(comp, "0") && comp != "0" {
return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
}
}
}
}
return v, nil
}
// ParseGeneric parses a "generic" version string. The version string must consist of two
// or more dot-separated numeric fields (the first of which can't have leading zeroes),
// followed by arbitrary uninterpreted data (which need not be separated from the final
// numeric field by punctuation). For convenience, leading and trailing whitespace is
// ignored, and the version can be preceded by the letter "v". See also ParseSemantic.
func ParseGeneric(str string) (*Version, error) {
return parse(str, false)
}
// MustParseGeneric is like ParseGeneric except that it panics on error
func MustParseGeneric(str string) *Version {
v, err := ParseGeneric(str)
if err != nil {
panic(err)
}
return v
}
// ParseSemantic parses a version string that exactly obeys the syntax and semantics of
// the "Semantic Versioning" specification (http://semver.org/) (although it ignores
// leading and trailing whitespace, and allows the version to be preceded by "v"). For
// version strings that are not guaranteed to obey the Semantic Versioning syntax, use
// ParseGeneric.
func ParseSemantic(str string) (*Version, error) {
return parse(str, true)
}
// MustParseSemantic is like ParseSemantic except that it panics on error
func MustParseSemantic(str string) *Version {
v, err := ParseSemantic(str)
if err != nil {
panic(err)
}
return v
}
// Major returns the major release number
func (v *Version) Major() uint {
return v.components[0]
}
// Minor returns the minor release number
func (v *Version) Minor() uint {
return v.components[1]
}
// Patch returns the patch release number if v is a Semantic Version, or 0
func (v *Version) Patch() uint {
if len(v.components) < 3 {
return 0
}
return v.components[2]
}
// BuildMetadata returns the build metadata, if v is a Semantic Version, or ""
func (v *Version) BuildMetadata() string {
return v.buildMetadata
}
// PreRelease returns the prerelease metadata, if v is a Semantic Version, or ""
func (v *Version) PreRelease() string {
return v.preRelease
}
// Components returns the version number components
func (v *Version) Components() []uint {
return v.components
}
// String converts a Version back to a string; note that for versions parsed with
// ParseGeneric, this will not include the trailing uninterpreted portion of the version
// number.
func (v *Version) String() string {
var buffer bytes.Buffer
for i, comp := range v.components {
if i > 0 {
buffer.WriteString(".")
}
buffer.WriteString(fmt.Sprintf("%d", comp))
}
if v.preRelease != "" {
buffer.WriteString("-")
buffer.WriteString(v.preRelease)
}
if v.buildMetadata != "" {
buffer.WriteString("+")
buffer.WriteString(v.buildMetadata)
}
return buffer.String()
}
// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0
// if they are equal
func (v *Version) compareInternal(other *Version) int {
for i := range v.components {
switch {
case i >= len(other.components):
if v.components[i] != 0 {
return 1
}
case other.components[i] < v.components[i]:
return 1
case other.components[i] > v.components[i]:
return -1
}
}
if !v.semver || !other.semver {
return 0
}
switch {
case v.preRelease == "" && other.preRelease != "":
return 1
case v.preRelease != "" && other.preRelease == "":
return -1
case v.preRelease == other.preRelease: // includes case where both are ""
return 0
}
vPR := strings.Split(v.preRelease, ".")
oPR := strings.Split(other.preRelease, ".")
for i := range vPR {
if i >= len(oPR) {
return 1
}
vNum, err := strconv.ParseUint(vPR[i], 10, 0)
if err == nil {
oNum, err := strconv.ParseUint(oPR[i], 10, 0)
if err == nil {
switch {
case oNum < vNum:
return 1
case oNum > vNum:
return -1
default:
continue
}
}
}
if oPR[i] < vPR[i] {
return 1
} else if oPR[i] > vPR[i] {
return -1
}
}
return 0
}
// AtLeast tests if a version is at least equal to a given minimum version. If both
// Versions are Semantic Versions, this will use the Semantic Version comparison
// algorithm. Otherwise, it will compare only the numeric components, with non-present
// components being considered "0" (ie, "1.4" is equal to "1.4.0").
func (v *Version) AtLeast(min *Version) bool {
return v.compareInternal(min) != -1
}
// LessThan tests if a version is less than a given version. (It is exactly the opposite
// of AtLeast, for situations where asking "is v too old?" makes more sense than asking
// "is v new enough?".)
func (v *Version) LessThan(other *Version) bool {
return v.compareInternal(other) == -1
}
// Compare compares v against a version string (which will be parsed as either Semantic
// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
// it is greater than other, or 0 if they are equal.
func (v *Version) Compare(other string) (int, error) {
ov, err := parse(other, v.semver)
if err != nil {
return 0, err
}
return v.compareInternal(ov), nil
}