Merge pull request #43 from skriss/pvprovider_optional
make PVProvider optional in server config; disallow snap/restore PVs …pull/58/merge
commit
b20feee7f9
|
@ -23,7 +23,7 @@ ark backup create NAME
|
|||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes take snapshots of PersistentVolumes as part of the backup (default true)
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ ark restore create BACKUP
|
|||
--namespace-mappings mapStringString namespace mappings from name in the backup to desired restored name in the form src1:dst1,src2:dst2,...
|
||||
--namespaces stringArray comma-separated list of namespaces to restore
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--restore-volumes whether to restore volumes from snapshots (default true)
|
||||
--restore-volumes optionalBool[=true] whether to restore volumes from snapshots
|
||||
-l, --selector labelSelector only restore resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
|
|
@ -24,7 +24,7 @@ ark schedule create NAME
|
|||
--schedule string a cron expression specifying a recurring schedule for this backup to run
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes take snapshots of PersistentVolumes as part of the backup (default true)
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
|
|
|
@ -25,17 +25,12 @@ metadata:
|
|||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
region: minio
|
||||
availabilityZone: minio
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://minio:9000
|
||||
region: us-west-2
|
||||
availabilityZone: us-west-2a
|
||||
backupStorageProvider:
|
||||
bucket: ark
|
||||
aws:
|
||||
region: minio
|
||||
availabilityZone: minio
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://minio:9000
|
||||
region: us-west-2
|
||||
backupSyncPeriod: 60m
|
||||
gcSyncPeriod: 60m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
@ -50,7 +45,7 @@ The configurable parameters are as follows:
|
|||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `persistentVolumeProvider` | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | Required Field | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted).<br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `persistentVolumeProvider` | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | None (Optional) | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted), if any.<br><br>If not specified, Backups and Restores requesting PV snapshots & restores, respectively, are considered invalid. <br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `backupStorageProvider`/(inline) | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | Required Field | The specification for whichever cloud provider will be used to actually store the backups. |
|
||||
| `backupStorageProvider/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `backupSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to make sure that the appropriate Backup resources have been created for existing backup files. |
|
||||
|
@ -63,22 +58,44 @@ The configurable parameters are as follows:
|
|||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
| `disableSSL` | bool | `false` | Set this to `true` if you are using Minio (or another local, S3-compatible storage service) and your deployment is not secured. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, `availabilityZone`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyID` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f"<br><br>Specify an [AWS KMS key][12] id to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
#### persistentVolumeProvider (AWS Only)
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
| `availabilityZone` | string | Required Field | *Example*: "us-east-1a"<br><br>See [AWS documentation][4] for details. |
|
||||
| `disableSSL` | bool | `false` | Set this to `true` if you are using Minio (or another local, S3-compatible storage service) and your deployment is not secured. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, `availabilityZone`, and `bucket`. This field is primarily for local sotrage services like Minio.|
|
||||
| `kmsKeyID` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f"<br><br>Specify an [AWS KMS key][12] id to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
### GCP
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
No parameters required; specify an empty object per [example file][13].
|
||||
|
||||
#### persistentVolumeProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `project` | string | Required Field | *Example*: "project-example-3jsn23"<br><br> See the [Project ID documentation][5] for details. |
|
||||
| `zone` | string | Required Field | *Example*: "us-central1-a"<br><br>See [GCP documentation][6] for the full list. |
|
||||
|
||||
### Azure
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
No parameters required; specify an empty object per [example file][14].
|
||||
|
||||
#### persistentVolumeProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `location` | string | Required Field | *Example*: "Canada East"<br><br>See [the list of available locations][7] (note that this particular page refers to them as "Regions"). |
|
||||
|
@ -97,4 +114,6 @@ The configurable parameters are as follows:
|
|||
[10]: #overview
|
||||
[11]: #example
|
||||
[12]: http://docs.aws.amazon.com/kms/latest/developerguide/overview.html
|
||||
[13]: ../examples/gcp/00-ark-config.yaml
|
||||
[14]: ../examples/azure/10-ark-config.yaml
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ backupStorageProvider:
|
|||
bucket: <YOUR_BUCKET>
|
||||
aws:
|
||||
region: <YOUR_REGION>
|
||||
availabilityZone: <YOUR_AVAILABILITY_ZONE>
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
|
|
@ -24,9 +24,7 @@ persistentVolumeProvider:
|
|||
apiTimeout: <YOUR_TIMEOUT>
|
||||
backupStorageProvider:
|
||||
bucket: <YOUR_BUCKET>
|
||||
azure:
|
||||
location: <YOUR_LOCATION>
|
||||
apiTimeout: <YOUR_TIMEOUT>
|
||||
azure: {}
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
|
|
@ -24,9 +24,7 @@ persistentVolumeProvider:
|
|||
zone: <YOUR_ZONE>
|
||||
backupStorageProvider:
|
||||
bucket: <YOUR_BUCKET>
|
||||
gcp:
|
||||
project: <YOUR_PROJECT>
|
||||
zone: <YOUR_ZONE>
|
||||
gcp: {}
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
|
|
@ -18,17 +18,10 @@ kind: Config
|
|||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
region: minio
|
||||
availabilityZone: minio
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://minio:9000
|
||||
backupStorageProvider:
|
||||
bucket: ark
|
||||
aws:
|
||||
region: minio
|
||||
availabilityZone: minio
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://minio:9000
|
||||
backupSyncPeriod: 1m
|
||||
|
|
|
@ -41,10 +41,10 @@ type BackupSpec struct {
|
|||
// or nil, all objects are included. Optional.
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
|
||||
// SnapshotVolumes is a bool which specifies whether to take
|
||||
// cloud snapshots of any PV's referenced in the set of objects
|
||||
// included in the Backup.
|
||||
SnapshotVolumes bool `json:"snapshotVolumes"`
|
||||
// SnapshotVolumes specifies whether to take cloud snapshots
|
||||
// of any PV's referenced in the set of objects included
|
||||
// in the Backup.
|
||||
SnapshotVolumes *bool `json:"snapshotVolumes"`
|
||||
|
||||
// TTL is a time.Duration-parseable string describing how long
|
||||
// the Backup should be retained for.
|
||||
|
|
|
@ -35,8 +35,8 @@ type Config struct {
|
|||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
// PersistentVolumeProvider is the configuration information for the cloud where
|
||||
// the cluster is running and has PersistentVolumes to snapshot or restore.
|
||||
PersistentVolumeProvider CloudProviderConfig `json:"persistentVolumeProvider"`
|
||||
// the cluster is running and has PersistentVolumes to snapshot or restore. Optional.
|
||||
PersistentVolumeProvider *CloudProviderConfig `json:"persistentVolumeProvider"`
|
||||
|
||||
// BackupStorageProvider is the configuration information for the cloud where
|
||||
// Ark backups are stored in object storage. This may be a different cloud than
|
||||
|
|
|
@ -38,9 +38,9 @@ type RestoreSpec struct {
|
|||
// or nil, all objects are included. Optional.
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
|
||||
// RestorePVs is a bool defining whether to restore all included
|
||||
// PVs from snapshot (via the cloudprovider). Default false.
|
||||
RestorePVs bool `json:"restorePVs"`
|
||||
// RestorePVs specifies whether to restore all included
|
||||
// PVs from snapshot (via the cloudprovider).
|
||||
RestorePVs *bool `json:"restorePVs"`
|
||||
}
|
||||
|
||||
// RestorePhase is a string representation of the lifecycle phase
|
||||
|
|
|
@ -364,7 +364,10 @@ func (*realItemBackupper) backupItem(ctx *backupContext, item map[string]interfa
|
|||
|
||||
if action != nil {
|
||||
glog.V(4).Infof("Executing action on %s, ns=%s, name=%s", groupResource, namespace, name)
|
||||
action.Execute(item, ctx.backup)
|
||||
|
||||
if err := action.Execute(item, ctx.backup); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Backing up resource=%s, ns=%s, name=%s", groupResource, namespace, name)
|
||||
|
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package backup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
)
|
||||
|
||||
// volumeSnapshotAction is a struct that knows how to take snapshots of PersistentVolumes
|
||||
|
@ -38,11 +38,15 @@ type volumeSnapshotAction struct {
|
|||
|
||||
var _ Action = &volumeSnapshotAction{}
|
||||
|
||||
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) Action {
|
||||
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Action, error) {
|
||||
if snapshotService == nil {
|
||||
return nil, errors.New("snapshotService cannot be nil")
|
||||
}
|
||||
|
||||
return &volumeSnapshotAction{
|
||||
snapshotService: snapshotService,
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
|
@ -50,7 +54,7 @@ func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) Acti
|
|||
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
||||
func (a *volumeSnapshotAction) Execute(volume map[string]interface{}, backup *api.Backup) error {
|
||||
backupName := fmt.Sprintf("%s/%s", backup.Namespace, backup.Name)
|
||||
if !backup.Spec.SnapshotVolumes {
|
||||
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
|
||||
glog.V(2).Infof("Backup %q has volume snapshots disabled; skipping volume snapshot action.", backupName)
|
||||
return nil
|
||||
}
|
||||
|
@ -58,14 +62,20 @@ func (a *volumeSnapshotAction) Execute(volume map[string]interface{}, backup *ap
|
|||
metadata := volume["metadata"].(map[string]interface{})
|
||||
name := metadata["name"].(string)
|
||||
|
||||
volumeID := getVolumeID(volume)
|
||||
volumeID, err := kubeutil.GetVolumeID(volume)
|
||||
// non-nil error means it's a supported PV source but volume ID can't be found
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting volume ID for backup %q, PersistentVolume %q: %v", backupName, name, err)
|
||||
}
|
||||
// no volumeID / nil error means unsupported PV source
|
||||
if volumeID == "" {
|
||||
return fmt.Errorf("unable to determine volume ID for backup %q, PersistentVolume %q", backupName, name)
|
||||
glog.V(2).Infof("Backup %q: PersistentVolume %q is not a supported volume type for snapshots, skipping.", backupName, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
expiration := a.clock.Now().Add(backup.Spec.TTL.Duration)
|
||||
|
||||
glog.Infof("Backup %q: snapshotting PersistenVolume %q, volume-id %q, expiration %v", backupName, name, volumeID, expiration)
|
||||
glog.Infof("Backup %q: snapshotting PersistentVolume %q, volume-id %q, expiration %v", backupName, name, volumeID, expiration)
|
||||
|
||||
snapshotID, err := a.snapshotService.CreateSnapshot(volumeID)
|
||||
if err != nil {
|
||||
|
@ -91,38 +101,3 @@ func (a *volumeSnapshotAction) Execute(volume map[string]interface{}, backup *ap
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
var ebsVolumeIDRegex = regexp.MustCompile("vol-.*")
|
||||
|
||||
func getVolumeID(pv map[string]interface{}) string {
|
||||
spec, err := collections.GetMap(pv, "spec")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if aws, err := collections.GetMap(spec, "awsElasticBlockStore"); err == nil {
|
||||
volumeID, err := collections.GetString(aws, "volumeID")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return ebsVolumeIDRegex.FindString(volumeID)
|
||||
}
|
||||
|
||||
if gce, err := collections.GetMap(spec, "gcePersistentDisk"); err == nil {
|
||||
volumeID, err := collections.GetString(gce, "pdName")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return volumeID
|
||||
}
|
||||
|
||||
if gce, err := collections.GetMap(spec, "azureDisk"); err == nil {
|
||||
volumeID, err := collections.GetString(gce, "diskName")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return volumeID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
ttl time.Duration
|
||||
expectError bool
|
||||
expectedVolumeID string
|
||||
expectedSnapshotsTaken int
|
||||
existingVolumeBackups map[string]*v1.VolumeBackupInfo
|
||||
volumeInfo map[string]v1.VolumeBackupInfo
|
||||
}{
|
||||
|
@ -55,10 +56,10 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "can't find volume id - spec but no volume source defined",
|
||||
name: "unsupported PV source type",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {}}`,
|
||||
expectError: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"unsupportedPVSource": {}}}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "can't find volume id - aws but no volume id",
|
||||
|
@ -77,6 +78,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
|
@ -88,6 +90,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
|
@ -99,6 +102,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-west-2a/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
|
@ -110,17 +114,31 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"pd-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"azureDisk": {"diskName": "foo-disk"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "foo-disk",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"foo-disk": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preexisting volume backup info in backup status",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
existingVolumeBackups: map[string]*v1.VolumeBackupInfo{
|
||||
|
@ -146,7 +164,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
Name: "mybackup",
|
||||
},
|
||||
Spec: v1.BackupSpec{
|
||||
SnapshotVolumes: test.snapshotEnabled,
|
||||
SnapshotVolumes: &test.snapshotEnabled,
|
||||
TTL: metav1.Duration{Duration: test.ttl},
|
||||
},
|
||||
Status: v1.BackupStatus{
|
||||
|
@ -155,7 +173,10 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
}
|
||||
|
||||
snapshotService := &FakeSnapshotService{SnapshottableVolumes: test.volumeInfo}
|
||||
action := NewVolumeSnapshotAction(snapshotService).(*volumeSnapshotAction)
|
||||
|
||||
vsa, _ := NewVolumeSnapshotAction(snapshotService)
|
||||
action := vsa.(*volumeSnapshotAction)
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
action.clock = fakeClock
|
||||
|
||||
|
@ -185,8 +206,9 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
}
|
||||
|
||||
// we should have one snapshot taken exactly
|
||||
require.Equal(t, 1, snapshotService.SnapshotsTaken.Len())
|
||||
require.Equal(t, test.expectedSnapshotsTaken, snapshotService.SnapshotsTaken.Len())
|
||||
|
||||
if test.expectedSnapshotsTaken > 0 {
|
||||
// the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes
|
||||
// for the volume we ran the test for
|
||||
snapshotID, _ := snapshotService.SnapshotsTaken.PopAny()
|
||||
|
@ -200,6 +222,7 @@ func TestVolumeSnapshotAction(t *testing.T) {
|
|||
if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,11 @@ limitations under the License.
|
|||
package aws
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -33,6 +36,53 @@ type blockStorageAdapter struct {
|
|||
az string
|
||||
}
|
||||
|
||||
func getSession(config *aws.Config) (*session.Session, error) {
|
||||
sess, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := sess.Config.Credentials.Get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sess, nil
|
||||
}
|
||||
|
||||
func NewBlockStorageAdapter(region, availabilityZone string) (cloudprovider.BlockStorageAdapter, error) {
|
||||
if region == "" {
|
||||
return nil, errors.New("missing region in aws configuration in config file")
|
||||
}
|
||||
if availabilityZone == "" {
|
||||
return nil, errors.New("missing availabilityZone in aws configuration in config file")
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig().WithRegion(region)
|
||||
|
||||
sess, err := getSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate the availabilityZone
|
||||
var (
|
||||
ec2Client = ec2.New(sess)
|
||||
azReq = &ec2.DescribeAvailabilityZonesInput{ZoneNames: []*string{&availabilityZone}}
|
||||
)
|
||||
res, err := ec2Client.DescribeAvailabilityZones(azReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.AvailabilityZones) == 0 {
|
||||
return nil, fmt.Errorf("availability zone %q not found", availabilityZone)
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
ec2: ec2Client,
|
||||
az: availabilityZone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// iopsVolumeTypes is a set of AWS EBS volume types for which IOPS should
|
||||
// be captured during snapshot and provided when creating a new volume
|
||||
// from snapshot.
|
||||
|
|
|
@ -17,9 +17,11 @@ limitations under the License.
|
|||
package aws
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
|
@ -32,6 +34,40 @@ type objectStorageAdapter struct {
|
|||
kmsKeyID string
|
||||
}
|
||||
|
||||
func NewObjectStorageAdapter(region, s3URL, kmsKeyID string, s3ForcePathStyle bool) (cloudprovider.ObjectStorageAdapter, error) {
|
||||
if region == "" {
|
||||
return nil, errors.New("missing region in aws configuration in config file")
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithS3ForcePathStyle(s3ForcePathStyle)
|
||||
|
||||
if s3URL != "" {
|
||||
awsConfig = awsConfig.WithEndpointResolver(
|
||||
endpoints.ResolverFunc(func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
if service == endpoints.S3ServiceID {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: s3URL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
sess, err := getSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &objectStorageAdapter{
|
||||
s3: s3.New(sess),
|
||||
kmsKeyID: kmsKeyID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
req := &s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type storageAdapter struct {
|
||||
blockStorage *blockStorageAdapter
|
||||
objectStorage *objectStorageAdapter
|
||||
}
|
||||
|
||||
var _ cloudprovider.StorageAdapter = &storageAdapter{}
|
||||
|
||||
func NewStorageAdapter(config *aws.Config, availabilityZone string, kmsKeyID string) (cloudprovider.StorageAdapter, error) {
|
||||
sess, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := sess.Config.Credentials.Get(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate the availabilityZone
|
||||
var (
|
||||
ec2Client = ec2.New(sess)
|
||||
azReq = &ec2.DescribeAvailabilityZonesInput{ZoneNames: []*string{&availabilityZone}}
|
||||
)
|
||||
res, err := ec2Client.DescribeAvailabilityZones(azReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.AvailabilityZones) == 0 {
|
||||
return nil, fmt.Errorf("availability zone %q not found", availabilityZone)
|
||||
}
|
||||
|
||||
return &storageAdapter{
|
||||
blockStorage: &blockStorageAdapter{
|
||||
ec2: ec2Client,
|
||||
az: availabilityZone,
|
||||
},
|
||||
objectStorage: &objectStorageAdapter{
|
||||
s3: s3.New(sess),
|
||||
kmsKeyID: kmsKeyID,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
|
||||
return op.objectStorage
|
||||
}
|
||||
|
||||
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
|
||||
return op.blockStorage
|
||||
}
|
|
@ -20,17 +20,21 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
azure "github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/examples/helpers"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/satori/uuid"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type blockStorageAdapter struct {
|
||||
disks *azure.DisksClient
|
||||
snaps *azure.SnapshotsClient
|
||||
disks *disk.DisksClient
|
||||
snaps *disk.SnapshotsClient
|
||||
subscription string
|
||||
resourceGroup string
|
||||
location string
|
||||
|
@ -39,19 +43,104 @@ type blockStorageAdapter struct {
|
|||
|
||||
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
|
||||
|
||||
const (
|
||||
azureClientIDKey string = "AZURE_CLIENT_ID"
|
||||
azureClientSecretKey string = "AZURE_CLIENT_SECRET"
|
||||
azureSubscriptionIDKey string = "AZURE_SUBSCRIPTION_ID"
|
||||
azureTenantIDKey string = "AZURE_TENANT_ID"
|
||||
azureStorageAccountIDKey string = "AZURE_STORAGE_ACCOUNT_ID"
|
||||
azureStorageKeyKey string = "AZURE_STORAGE_KEY"
|
||||
azureResourceGroupKey string = "AZURE_RESOURCE_GROUP"
|
||||
)
|
||||
|
||||
func getConfig() map[string]string {
|
||||
cfg := map[string]string{
|
||||
azureClientIDKey: "",
|
||||
azureClientSecretKey: "",
|
||||
azureSubscriptionIDKey: "",
|
||||
azureTenantIDKey: "",
|
||||
azureStorageAccountIDKey: "",
|
||||
azureStorageKeyKey: "",
|
||||
azureResourceGroupKey: "",
|
||||
}
|
||||
|
||||
for key := range cfg {
|
||||
cfg[key] = os.Getenv(key)
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func NewBlockStorageAdapter(location string, apiTimeout time.Duration) (cloudprovider.BlockStorageAdapter, error) {
|
||||
if location == "" {
|
||||
return nil, errors.New("missing location in azure configuration in config file")
|
||||
}
|
||||
|
||||
if apiTimeout == 0 {
|
||||
apiTimeout = time.Minute
|
||||
}
|
||||
|
||||
cfg := getConfig()
|
||||
|
||||
spt, err := helpers.NewServicePrincipalTokenFromCredentials(cfg, azure.PublicCloud.ResourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new service principal: %v", err)
|
||||
}
|
||||
|
||||
disksClient := disk.NewDisksClient(cfg[azureSubscriptionIDKey])
|
||||
snapsClient := disk.NewSnapshotsClient(cfg[azureSubscriptionIDKey])
|
||||
|
||||
disksClient.Authorizer = spt
|
||||
snapsClient.Authorizer = spt
|
||||
|
||||
// validate the location
|
||||
groupClient := subscriptions.NewGroupClient()
|
||||
groupClient.Authorizer = spt
|
||||
|
||||
locs, err := groupClient.ListLocations(cfg[azureSubscriptionIDKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if locs.Value == nil {
|
||||
return nil, errors.New("no locations returned from Azure API")
|
||||
}
|
||||
|
||||
locationExists := false
|
||||
for _, loc := range *locs.Value {
|
||||
if (loc.Name != nil && *loc.Name == location) || (loc.DisplayName != nil && *loc.DisplayName == location) {
|
||||
locationExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !locationExists {
|
||||
return nil, fmt.Errorf("location %q not found", location)
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
disks: &disksClient,
|
||||
snaps: &snapsClient,
|
||||
subscription: cfg[azureSubscriptionIDKey],
|
||||
resourceGroup: cfg[azureResourceGroupKey],
|
||||
location: location,
|
||||
apiTimeout: apiTimeout,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType string, iops *int64) (string, error) {
|
||||
fullSnapshotName := getFullSnapshotName(op.subscription, op.resourceGroup, snapshotID)
|
||||
diskName := "restore-" + uuid.NewV4().String()
|
||||
|
||||
disk := azure.Model{
|
||||
disk := disk.Model{
|
||||
Name: &diskName,
|
||||
Location: &op.location,
|
||||
Properties: &azure.Properties{
|
||||
CreationData: &azure.CreationData{
|
||||
CreateOption: azure.Copy,
|
||||
Properties: &disk.Properties{
|
||||
CreationData: &disk.CreationData{
|
||||
CreateOption: disk.Copy,
|
||||
SourceResourceID: &fullSnapshotName,
|
||||
},
|
||||
AccountType: azure.StorageAccountTypes(volumeType),
|
||||
AccountType: disk.StorageAccountTypes(volumeType),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -136,11 +225,11 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID string, tags map[string]s
|
|||
snapshotName = volumeID[0:80-len(suffix)] + suffix
|
||||
}
|
||||
|
||||
snap := azure.Snapshot{
|
||||
snap := disk.Snapshot{
|
||||
Name: &snapshotName,
|
||||
Properties: &azure.Properties{
|
||||
CreationData: &azure.CreationData{
|
||||
CreateOption: azure.Copy,
|
||||
Properties: &disk.Properties{
|
||||
CreationData: &disk.CreationData{
|
||||
CreateOption: disk.Copy,
|
||||
SourceResourceID: &fullDiskName,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -34,6 +34,21 @@ type objectStorageAdapter struct {
|
|||
|
||||
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
|
||||
|
||||
func NewObjectStorageAdapter() (cloudprovider.ObjectStorageAdapter, error) {
|
||||
cfg := getConfig()
|
||||
|
||||
storageClient, err := storage.NewBasicClient(cfg[azureStorageAccountIDKey], cfg[azureStorageKeyKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blobClient := storageClient.GetBlobService()
|
||||
|
||||
return &objectStorageAdapter{
|
||||
blobClient: &blobClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,130 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/examples/helpers"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/resources/subscriptions"
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
azureClientIDKey string = "AZURE_CLIENT_ID"
|
||||
azureClientSecretKey string = "AZURE_CLIENT_SECRET"
|
||||
azureSubscriptionIDKey string = "AZURE_SUBSCRIPTION_ID"
|
||||
azureTenantIDKey string = "AZURE_TENANT_ID"
|
||||
azureStorageAccountIDKey string = "AZURE_STORAGE_ACCOUNT_ID"
|
||||
azureStorageKeyKey string = "AZURE_STORAGE_KEY"
|
||||
azureResourceGroupKey string = "AZURE_RESOURCE_GROUP"
|
||||
)
|
||||
|
||||
type storageAdapter struct {
|
||||
objectStorage *objectStorageAdapter
|
||||
blockStorage *blockStorageAdapter
|
||||
}
|
||||
|
||||
var _ cloudprovider.StorageAdapter = &storageAdapter{}
|
||||
|
||||
func NewStorageAdapter(location string, apiTimeout time.Duration) (cloudprovider.StorageAdapter, error) {
|
||||
cfg := map[string]string{
|
||||
azureClientIDKey: "",
|
||||
azureClientSecretKey: "",
|
||||
azureSubscriptionIDKey: "",
|
||||
azureTenantIDKey: "",
|
||||
azureStorageAccountIDKey: "",
|
||||
azureStorageKeyKey: "",
|
||||
azureResourceGroupKey: "",
|
||||
}
|
||||
|
||||
for key := range cfg {
|
||||
cfg[key] = os.Getenv(key)
|
||||
}
|
||||
|
||||
spt, err := helpers.NewServicePrincipalTokenFromCredentials(cfg, azure.PublicCloud.ResourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating new service principal: %v", err)
|
||||
}
|
||||
|
||||
disksClient := disk.NewDisksClient(cfg[azureSubscriptionIDKey])
|
||||
snapsClient := disk.NewSnapshotsClient(cfg[azureSubscriptionIDKey])
|
||||
|
||||
disksClient.Authorizer = spt
|
||||
snapsClient.Authorizer = spt
|
||||
|
||||
storageClient, _ := storage.NewBasicClient(cfg[azureStorageAccountIDKey], cfg[azureStorageKeyKey])
|
||||
blobClient := storageClient.GetBlobService()
|
||||
|
||||
if apiTimeout == 0 {
|
||||
apiTimeout = time.Minute
|
||||
}
|
||||
|
||||
// validate the location
|
||||
groupClient := subscriptions.NewGroupClient()
|
||||
groupClient.Authorizer = spt
|
||||
|
||||
locs, err := groupClient.ListLocations(cfg[azureSubscriptionIDKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if locs.Value == nil {
|
||||
return nil, errors.New("no locations returned from Azure API")
|
||||
}
|
||||
|
||||
locationExists := false
|
||||
for _, loc := range *locs.Value {
|
||||
if (loc.Name != nil && *loc.Name == location) || (loc.DisplayName != nil && *loc.DisplayName == location) {
|
||||
locationExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !locationExists {
|
||||
return nil, fmt.Errorf("location %q not found", location)
|
||||
}
|
||||
|
||||
return &storageAdapter{
|
||||
objectStorage: &objectStorageAdapter{
|
||||
blobClient: &blobClient,
|
||||
},
|
||||
blockStorage: &blockStorageAdapter{
|
||||
disks: &disksClient,
|
||||
snaps: &snapsClient,
|
||||
subscription: cfg[azureSubscriptionIDKey],
|
||||
resourceGroup: cfg[azureResourceGroupKey],
|
||||
location: location,
|
||||
apiTimeout: apiTimeout,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
|
||||
return op.objectStorage
|
||||
}
|
||||
|
||||
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
|
||||
return op.blockStorage
|
||||
}
|
|
@ -17,10 +17,14 @@ limitations under the License.
|
|||
package gcp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/compute/v0.beta"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -36,6 +40,41 @@ type blockStorageAdapter struct {
|
|||
|
||||
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
|
||||
|
||||
func NewBlockStorageAdapter(project, zone string) (cloudprovider.BlockStorageAdapter, error) {
|
||||
if project == "" {
|
||||
return nil, errors.New("missing project in gcp configuration in config file")
|
||||
}
|
||||
if zone == "" {
|
||||
return nil, errors.New("missing zone in gcp configuration in config file")
|
||||
}
|
||||
|
||||
client, err := google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gce, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate project & zone
|
||||
res, err := gce.Zones.Get(project, zone).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
return nil, fmt.Errorf("zone %q not found for project %q", project, zone)
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
gce: gce,
|
||||
project: project,
|
||||
zone: zone,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID string, volumeType string, iops *int64) (volumeID string, err error) {
|
||||
res, err := op.gce.Snapshots.Get(op.project, snapshotID).Do()
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"io"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
|
@ -31,6 +33,22 @@ type objectStorageAdapter struct {
|
|||
|
||||
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
|
||||
|
||||
func NewObjectStorageAdapter() (cloudprovider.ObjectStorageAdapter, error) {
|
||||
client, err := google.DefaultClient(oauth2.NoContext, storage.DevstorageReadWriteScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcs, err := storage.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &objectStorageAdapter{
|
||||
gcs: gcs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
obj := &storage.Object{
|
||||
Name: key,
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/compute/v0.beta"
|
||||
"google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type storageAdapter struct {
|
||||
blockStorage *blockStorageAdapter
|
||||
objectStorage *objectStorageAdapter
|
||||
}
|
||||
|
||||
var _ cloudprovider.StorageAdapter = &storageAdapter{}
|
||||
|
||||
func NewStorageAdapter(project string, zone string) (cloudprovider.StorageAdapter, error) {
|
||||
client, err := google.DefaultClient(oauth2.NoContext, compute.ComputeScope, storage.DevstorageReadWriteScope)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gce, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate project & zone
|
||||
res, err := gce.Zones.Get(project, zone).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
return nil, fmt.Errorf("zone %q not found for project %q", project, zone)
|
||||
}
|
||||
|
||||
gcs, err := storage.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &storageAdapter{
|
||||
objectStorage: &objectStorageAdapter{
|
||||
gcs: gcs,
|
||||
},
|
||||
blockStorage: &blockStorageAdapter{
|
||||
gce: gce,
|
||||
project: project,
|
||||
zone: zone,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
|
||||
return op.objectStorage
|
||||
}
|
||||
|
||||
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
|
||||
return op.blockStorage
|
||||
}
|
|
@ -63,10 +63,3 @@ type BlockStorageAdapter interface {
|
|||
// DeleteSnapshot deletes the specified volume snapshot.
|
||||
DeleteSnapshot(snapshotID string) error
|
||||
}
|
||||
|
||||
// StorageAdapter exposes object- and block-storage interfaces and associated methods
|
||||
// for a given storage provider.
|
||||
type StorageAdapter interface {
|
||||
ObjectStorage() ObjectStorageAdapter
|
||||
BlockStorage() BlockStorageAdapter
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ func NewCreateCommand(f client.Factory) *cobra.Command {
|
|||
type CreateOptions struct {
|
||||
Name string
|
||||
TTL time.Duration
|
||||
SnapshotVolumes bool
|
||||
SnapshotVolumes flag.OptionalBool
|
||||
IncludeNamespaces flag.StringArray
|
||||
ExcludeNamespaces flag.StringArray
|
||||
IncludeResources flag.StringArray
|
||||
|
@ -70,19 +70,22 @@ func NewCreateOptions() *CreateOptions {
|
|||
TTL: 24 * time.Hour,
|
||||
IncludeNamespaces: flag.NewStringArray("*"),
|
||||
Labels: flag.NewMap(),
|
||||
SnapshotVolumes: true,
|
||||
SnapshotVolumes: flag.NewOptionalBool(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.DurationVar(&o.TTL, "ttl", o.TTL, "how long before the backup can be garbage collected")
|
||||
flags.BoolVar(&o.SnapshotVolumes, "snapshot-volumes", o.SnapshotVolumes, "take snapshots of PersistentVolumes as part of the backup")
|
||||
flags.Var(&o.IncludeNamespaces, "include-namespaces", "namespaces to include in the backup (use '*' for all namespaces)")
|
||||
flags.Var(&o.ExcludeNamespaces, "exclude-namespaces", "namespaces to exclude from the backup")
|
||||
flags.Var(&o.IncludeResources, "include-resources", "resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)")
|
||||
flags.Var(&o.ExcludeResources, "exclude-resources", "resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io")
|
||||
flags.Var(&o.Labels, "labels", "labels to apply to the backup")
|
||||
flags.VarP(&o.Selector, "selector", "l", "only back up resources matching this label selector")
|
||||
f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "take snapshots of PersistentVolumes as part of the backup")
|
||||
// this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true"
|
||||
// like a normal bool flag
|
||||
f.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
|
||||
|
@ -120,7 +123,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
IncludedResources: o.IncludeResources,
|
||||
ExcludedResources: o.ExcludeResources,
|
||||
LabelSelector: o.Selector.LabelSelector,
|
||||
SnapshotVolumes: o.SnapshotVolumes,
|
||||
SnapshotVolumes: o.SnapshotVolumes.Value,
|
||||
TTL: metav1.Duration{Duration: o.TTL},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func NewCreateCommand(f client.Factory) *cobra.Command {
|
|||
|
||||
type CreateOptions struct {
|
||||
BackupName string
|
||||
RestoreVolumes bool
|
||||
RestoreVolumes flag.OptionalBool
|
||||
Labels flag.Map
|
||||
Namespaces flag.StringArray
|
||||
NamespaceMappings flag.Map
|
||||
|
@ -66,16 +66,19 @@ func NewCreateOptions() *CreateOptions {
|
|||
return &CreateOptions{
|
||||
Labels: flag.NewMap(),
|
||||
NamespaceMappings: flag.NewMap().WithEntryDelimiter(",").WithKeyValueDelimiter(":"),
|
||||
RestoreVolumes: true,
|
||||
RestoreVolumes: flag.NewOptionalBool(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.BoolVar(&o.RestoreVolumes, "restore-volumes", o.RestoreVolumes, "whether to restore volumes from snapshots")
|
||||
flags.Var(&o.Labels, "labels", "labels to apply to the restore")
|
||||
flags.Var(&o.Namespaces, "namespaces", "comma-separated list of namespaces to restore")
|
||||
flags.Var(&o.NamespaceMappings, "namespace-mappings", "namespace mappings from name in the backup to desired restored name in the form src1:dst1,src2:dst2,...")
|
||||
flags.VarP(&o.Selector, "selector", "l", "only restore resources matching this label selector")
|
||||
f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "whether to restore volumes from snapshots")
|
||||
// this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true"
|
||||
// like a normal bool flag
|
||||
f.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
|
||||
|
@ -112,7 +115,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
Namespaces: o.Namespaces,
|
||||
NamespaceMapping: o.NamespaceMappings.Data(),
|
||||
LabelSelector: o.Selector.LabelSelector,
|
||||
RestorePVs: o.RestoreVolumes,
|
||||
RestorePVs: o.RestoreVolumes.Value,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
IncludedResources: o.BackupOptions.IncludeResources,
|
||||
ExcludedResources: o.BackupOptions.ExcludeResources,
|
||||
LabelSelector: o.BackupOptions.Selector.LabelSelector,
|
||||
SnapshotVolumes: o.BackupOptions.SnapshotVolumes,
|
||||
SnapshotVolumes: o.BackupOptions.SnapshotVolumes.Value,
|
||||
TTL: metav1.Duration{Duration: o.BackupOptions.TTL},
|
||||
},
|
||||
Schedule: o.Schedule,
|
||||
|
|
|
@ -18,14 +18,11 @@ package server
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
|
@ -242,112 +239,108 @@ func (s *server) watchConfig(config *api.Config) {
|
|||
|
||||
func (s *server) initBackupService(config *api.Config) error {
|
||||
glog.Infof("Configuring cloud provider for backup service")
|
||||
cloud, err := initCloud(config.BackupStorageProvider.CloudProviderConfig, "backupStorageProvider")
|
||||
objectStorage, err := getObjectStorageProvider(config.BackupStorageProvider.CloudProviderConfig, "backupStorageProvider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.backupService = cloudprovider.NewBackupService(cloud.ObjectStorage())
|
||||
|
||||
s.backupService = cloudprovider.NewBackupService(objectStorage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) initSnapshotService(config *api.Config) error {
|
||||
if config.PersistentVolumeProvider == nil {
|
||||
glog.Infof("PersistentVolumeProvider config not provided, volume snapshots and restores are disabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Infof("Configuring cloud provider for snapshot service")
|
||||
cloud, err := initCloud(config.PersistentVolumeProvider, "persistentVolumeProvider")
|
||||
blockStorage, err := getBlockStorageProvider(*config.PersistentVolumeProvider, "persistentVolumeProvider")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.snapshotService = cloudprovider.NewSnapshotService(cloud.BlockStorage())
|
||||
s.snapshotService = cloudprovider.NewSnapshotService(blockStorage)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initCloud(config api.CloudProviderConfig, field string) (cloudprovider.StorageAdapter, error) {
|
||||
func hasOneCloudProvider(cloudConfig api.CloudProviderConfig) bool {
|
||||
found := false
|
||||
|
||||
if cloudConfig.AWS != nil {
|
||||
found = true
|
||||
}
|
||||
|
||||
if cloudConfig.GCP != nil {
|
||||
if found {
|
||||
return false
|
||||
}
|
||||
found = true
|
||||
}
|
||||
|
||||
if cloudConfig.Azure != nil {
|
||||
if found {
|
||||
return false
|
||||
}
|
||||
found = true
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
func getObjectStorageProvider(cloudConfig api.CloudProviderConfig, field string) (cloudprovider.ObjectStorageAdapter, error) {
|
||||
var (
|
||||
cloud cloudprovider.StorageAdapter
|
||||
objectStorage cloudprovider.ObjectStorageAdapter
|
||||
err error
|
||||
)
|
||||
|
||||
if config.AWS != nil {
|
||||
cloud, err = getAWSCloudProvider(config)
|
||||
if !hasOneCloudProvider(cloudConfig) {
|
||||
return nil, fmt.Errorf("you must specify exactly one of aws, gcp, or azure for %s", field)
|
||||
}
|
||||
|
||||
if config.GCP != nil {
|
||||
if cloud != nil {
|
||||
return nil, fmt.Errorf("you may only specify one of aws, gcp, or azure for %s", field)
|
||||
}
|
||||
cloud, err = getGCPCloudProvider(config)
|
||||
}
|
||||
|
||||
if config.Azure != nil {
|
||||
if cloud != nil {
|
||||
return nil, fmt.Errorf("you may only specify one of aws, gcp, or azure for %s", field)
|
||||
}
|
||||
cloud, err = getAzureCloudProvider(config)
|
||||
switch {
|
||||
case cloudConfig.AWS != nil:
|
||||
objectStorage, err = arkaws.NewObjectStorageAdapter(
|
||||
cloudConfig.AWS.Region,
|
||||
cloudConfig.AWS.S3Url,
|
||||
cloudConfig.AWS.KMSKeyID,
|
||||
cloudConfig.AWS.S3ForcePathStyle)
|
||||
case cloudConfig.GCP != nil:
|
||||
objectStorage, err = gcp.NewObjectStorageAdapter()
|
||||
case cloudConfig.Azure != nil:
|
||||
objectStorage, err = azure.NewObjectStorageAdapter()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cloud == nil {
|
||||
return nil, fmt.Errorf("you must specify one of aws, gcp, or azure for %s", field)
|
||||
}
|
||||
|
||||
return cloud, err
|
||||
return objectStorage, nil
|
||||
}
|
||||
|
||||
func getAWSCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
|
||||
if cloudConfig.AWS == nil {
|
||||
return nil, errors.New("missing aws configuration in config file")
|
||||
}
|
||||
if cloudConfig.AWS.Region == "" {
|
||||
return nil, errors.New("missing region in aws configuration in config file")
|
||||
}
|
||||
if cloudConfig.AWS.AvailabilityZone == "" {
|
||||
return nil, errors.New("missing availabilityZone in aws configuration in config file")
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(cloudConfig.AWS.Region).
|
||||
WithS3ForcePathStyle(cloudConfig.AWS.S3ForcePathStyle)
|
||||
|
||||
if cloudConfig.AWS.S3Url != "" {
|
||||
awsConfig = awsConfig.WithEndpointResolver(
|
||||
endpoints.ResolverFunc(func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
if service == endpoints.S3ServiceID {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: cloudConfig.AWS.S3Url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
}),
|
||||
func getBlockStorageProvider(cloudConfig api.CloudProviderConfig, field string) (cloudprovider.BlockStorageAdapter, error) {
|
||||
var (
|
||||
blockStorage cloudprovider.BlockStorageAdapter
|
||||
err error
|
||||
)
|
||||
|
||||
if !hasOneCloudProvider(cloudConfig) {
|
||||
return nil, fmt.Errorf("you must specify exactly one of aws, gcp, or azure for %s", field)
|
||||
}
|
||||
|
||||
return arkaws.NewStorageAdapter(awsConfig, cloudConfig.AWS.AvailabilityZone, cloudConfig.AWS.KMSKeyID)
|
||||
}
|
||||
switch {
|
||||
case cloudConfig.AWS != nil:
|
||||
blockStorage, err = arkaws.NewBlockStorageAdapter(cloudConfig.AWS.Region, cloudConfig.AWS.AvailabilityZone)
|
||||
case cloudConfig.GCP != nil:
|
||||
blockStorage, err = gcp.NewBlockStorageAdapter(cloudConfig.GCP.Project, cloudConfig.GCP.Zone)
|
||||
case cloudConfig.Azure != nil:
|
||||
blockStorage, err = azure.NewBlockStorageAdapter(cloudConfig.Azure.Location, cloudConfig.Azure.APITimeout.Duration)
|
||||
}
|
||||
|
||||
func getGCPCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
|
||||
if cloudConfig.GCP == nil {
|
||||
return nil, errors.New("missing gcp configuration in config file")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cloudConfig.GCP.Project == "" {
|
||||
return nil, errors.New("missing project in gcp configuration in config file")
|
||||
}
|
||||
if cloudConfig.GCP.Zone == "" {
|
||||
return nil, errors.New("missing zone in gcp configuration in config file")
|
||||
}
|
||||
return gcp.NewStorageAdapter(cloudConfig.GCP.Project, cloudConfig.GCP.Zone)
|
||||
}
|
||||
|
||||
func getAzureCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
|
||||
if cloudConfig.Azure == nil {
|
||||
return nil, errors.New("missing azure configuration in config file")
|
||||
}
|
||||
if cloudConfig.Azure.Location == "" {
|
||||
return nil, errors.New("missing location in azure configuration in config file")
|
||||
}
|
||||
return azure.NewStorageAdapter(cloudConfig.Azure.Location, cloudConfig.Azure.APITimeout.Duration)
|
||||
return blockStorage, nil
|
||||
}
|
||||
|
||||
func durationMin(a, b time.Duration) time.Duration {
|
||||
|
@ -408,6 +401,7 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
backupper,
|
||||
s.backupService,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.snapshotService != nil,
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
@ -461,6 +455,7 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.backupService,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
s.snapshotService != nil,
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
@ -490,7 +485,12 @@ func newBackupper(
|
|||
actions := map[string]backup.Action{}
|
||||
|
||||
if snapshotService != nil {
|
||||
actions["persistentvolumes"] = backup.NewVolumeSnapshotAction(snapshotService)
|
||||
action, err := backup.NewVolumeSnapshotAction(snapshotService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
actions["persistentvolumes"] = action
|
||||
}
|
||||
|
||||
return backup.NewKubernetesBackupper(
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Enum is a Cobra-compatible wrapper for defining
|
||||
// a string flag that can be one of a specified set
|
||||
// of values.
|
||||
type Enum struct {
|
||||
allowedValues sets.String
|
||||
value string
|
||||
}
|
||||
|
||||
// NewEnum returns a new enum flag with the specified list
|
||||
// of allowed values. The first value specified is used
|
||||
// as the default.
|
||||
func NewEnum(allowedValues ...string) Enum {
|
||||
return Enum{
|
||||
allowedValues: sets.NewString(allowedValues...),
|
||||
value: allowedValues[0],
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the
|
||||
// enum flag.
|
||||
func (e *Enum) String() string {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set assigns the provided string to the enum
|
||||
// receiver. It returns an error if the string
|
||||
// is not an allowed value.
|
||||
func (e *Enum) Set(s string) error {
|
||||
if !e.allowedValues.Has(s) {
|
||||
return fmt.Errorf("invalid value: %q", s)
|
||||
}
|
||||
|
||||
e.value = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns a string representation of the
|
||||
// Enum type.
|
||||
func (e *Enum) Type() string {
|
||||
return "enum"
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package flag
|
||||
|
||||
import "strconv"
|
||||
|
||||
type OptionalBool struct {
|
||||
Value *bool
|
||||
}
|
||||
|
||||
func NewOptionalBool(defaultValue *bool) OptionalBool {
|
||||
return OptionalBool{
|
||||
Value: defaultValue,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the
|
||||
// enum flag.
|
||||
func (f *OptionalBool) String() string {
|
||||
switch f.Value {
|
||||
case nil:
|
||||
return "<nil>"
|
||||
default:
|
||||
return strconv.FormatBool(*f.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *OptionalBool) Set(val string) error {
|
||||
if val == "" {
|
||||
f.Value = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
parsed, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.Value = &parsed
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *OptionalBool) Type() string {
|
||||
return "optionalBool"
|
||||
}
|
|
@ -52,6 +52,7 @@ type backupController struct {
|
|||
backupper backup.Backupper
|
||||
backupService cloudprovider.BackupService
|
||||
bucket string
|
||||
pvProviderExists bool
|
||||
|
||||
lister listers.BackupLister
|
||||
listerSynced cache.InformerSynced
|
||||
|
@ -68,11 +69,13 @@ func NewBackupController(
|
|||
backupper backup.Backupper,
|
||||
backupService cloudprovider.BackupService,
|
||||
bucket string,
|
||||
pvProviderExists bool,
|
||||
) Interface {
|
||||
c := &backupController{
|
||||
backupper: backupper,
|
||||
backupService: backupService,
|
||||
bucket: bucket,
|
||||
pvProviderExists: pvProviderExists,
|
||||
|
||||
lister: backupInformer.Lister(),
|
||||
listerSynced: backupInformer.Informer().HasSynced,
|
||||
|
@ -297,6 +300,10 @@ func (controller *backupController) getValidationErrors(itm *api.Backup) []strin
|
|||
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
|
||||
}
|
||||
|
||||
if !controller.pvProviderExists && itm.Spec.SnapshotVolumes != nil && *itm.Spec.SnapshotVolumes {
|
||||
validationErrors = append(validationErrors, "Server is not configured for PV snapshots")
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
}
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
expectedExcludes []string
|
||||
backup *TestBackup
|
||||
expectBackup bool
|
||||
allowSnapshots bool
|
||||
}{
|
||||
{
|
||||
name: "bad key",
|
||||
|
@ -129,6 +130,20 @@ func TestProcessBackup(t *testing.T) {
|
|||
expectedIncludes: []string{"*"},
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "backup with SnapshotVolumes when allowSnapshots=false fails validation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
expectBackup: false,
|
||||
},
|
||||
{
|
||||
name: "backup with SnapshotVolumes when allowSnapshots=true gets executed",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
|
||||
allowSnapshots: true,
|
||||
expectedIncludes: []string{"*"},
|
||||
expectBackup: true,
|
||||
},
|
||||
}
|
||||
|
||||
// flag.Set("logtostderr", "true")
|
||||
|
@ -150,6 +165,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
backupper,
|
||||
cloudBackups,
|
||||
"bucket",
|
||||
test.allowSnapshots,
|
||||
).(*backupController)
|
||||
c.clock = clock.NewFakeClock(time.Now())
|
||||
|
||||
|
@ -180,6 +196,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
}
|
||||
|
||||
backup.Spec.IncludedNamespaces = expectedNSes
|
||||
backup.Spec.SnapshotVolumes = test.backup.Spec.SnapshotVolumes
|
||||
backup.Status.Phase = v1.BackupPhaseInProgress
|
||||
backup.Status.Expiration.Time = expiration
|
||||
backup.Status.Version = 1
|
||||
|
@ -226,6 +243,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
WithExcludedResources(test.expectedExcludes...).
|
||||
WithIncludedNamespaces(expectedNSes...).
|
||||
WithTTL(test.backup.Spec.TTL.Duration).
|
||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||
WithExpiration(expiration).
|
||||
WithVersion(1).
|
||||
Backup,
|
||||
|
@ -241,6 +259,7 @@ func TestProcessBackup(t *testing.T) {
|
|||
WithExcludedResources(test.expectedExcludes...).
|
||||
WithIncludedNamespaces(expectedNSes...).
|
||||
WithTTL(test.backup.Spec.TTL.Duration).
|
||||
WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes).
|
||||
WithExpiration(expiration).
|
||||
WithVersion(1).
|
||||
Backup,
|
||||
|
|
|
@ -108,7 +108,19 @@ func (c *gcController) cleanBackups() {
|
|||
// storage should happen first because otherwise there's a possibility the backup sync
|
||||
// controller would re-create the API object after deletion.
|
||||
for _, backup := range backups {
|
||||
if backup.Status.Expiration.Time.Before(now) {
|
||||
if !backup.Status.Expiration.Time.Before(now) {
|
||||
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
|
||||
// want to orphan the snapshots so skip garbage-collection entirely.
|
||||
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
|
||||
glog.Warningf("Cannot garbage-collect backup %s/%s because backup includes snapshots and server is not configured with PersistentVolumeProvider",
|
||||
backup.Namespace, backup.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("Removing backup %s/%s", backup.Namespace, backup.Name)
|
||||
if err := c.backupService.DeleteBackup(c.bucket, backup.Name); err != nil {
|
||||
glog.Errorf("error deleting backup %s/%s: %v", backup.Namespace, backup.Name, err)
|
||||
|
@ -125,9 +137,7 @@ func (c *gcController) cleanBackups() {
|
|||
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
|
||||
glog.Errorf("error deleting backup API object %s/%s: %v", backup.Namespace, backup.Name, err)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// also GC any Backup API objects without files in object storage
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
|
@ -41,6 +42,7 @@ type gcTest struct {
|
|||
bucket string
|
||||
backups map[string][]*api.Backup
|
||||
snapshots sets.String
|
||||
nilSnapshotService bool
|
||||
|
||||
expectedBackupsRemaining map[string]sets.String
|
||||
expectedSnapshotsRemaining sets.String
|
||||
|
@ -149,11 +151,38 @@ func TestGarbageCollect(t *testing.T) {
|
|||
expectedBackupsRemaining: make(map[string]sets.String),
|
||||
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
|
||||
},
|
||||
gcTest{
|
||||
name: "no snapshot service only GC's backups without snapshots",
|
||||
bucket: "bucket-1",
|
||||
backups: map[string][]*api.Backup{
|
||||
"bucket-1": []*api.Backup{
|
||||
NewTestBackup().WithName("backup-1").
|
||||
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
|
||||
WithSnapshot("pv-1", "snapshot-1").
|
||||
WithSnapshot("pv-2", "snapshot-2").
|
||||
Backup,
|
||||
NewTestBackup().WithName("backup-2").
|
||||
WithExpiration(fakeClock.Now().Add(-1 * time.Second)).
|
||||
Backup,
|
||||
},
|
||||
},
|
||||
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
|
||||
nilSnapshotService: true,
|
||||
expectedBackupsRemaining: map[string]sets.String{
|
||||
"bucket-1": sets.NewString("backup-1"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
backupService := &fakeBackupService{}
|
||||
snapshotService := &FakeSnapshotService{}
|
||||
var (
|
||||
backupService = &fakeBackupService{}
|
||||
snapshotService *FakeSnapshotService
|
||||
)
|
||||
|
||||
if !test.nilSnapshotService {
|
||||
snapshotService = &FakeSnapshotService{SnapshotsTaken: test.snapshots}
|
||||
}
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
backupService.backupsByBucket = make(map[string][]*api.Backup)
|
||||
|
@ -167,16 +196,19 @@ func TestGarbageCollect(t *testing.T) {
|
|||
backupService.backupsByBucket[bucket] = data
|
||||
}
|
||||
|
||||
snapshotService.SnapshotsTaken = test.snapshots
|
||||
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
snapSvc cloudprovider.SnapshotService
|
||||
)
|
||||
|
||||
if snapshotService != nil {
|
||||
snapSvc = snapshotService
|
||||
}
|
||||
|
||||
controller := NewGCController(
|
||||
backupService,
|
||||
snapshotService,
|
||||
snapSvc,
|
||||
test.bucket,
|
||||
1*time.Millisecond,
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
|
@ -202,7 +234,9 @@ func TestGarbageCollect(t *testing.T) {
|
|||
assert.Equal(t, test.expectedBackupsRemaining[bucket], backupNames)
|
||||
}
|
||||
|
||||
if !test.nilSnapshotService {
|
||||
assert.Equal(t, test.expectedSnapshotsRemaining, snapshotService.SnapshotsTaken)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ type restoreController struct {
|
|||
restorer restore.Restorer
|
||||
backupService cloudprovider.BackupService
|
||||
bucket string
|
||||
pvProviderExists bool
|
||||
|
||||
backupLister listers.BackupLister
|
||||
backupListerSynced cache.InformerSynced
|
||||
|
@ -64,6 +65,7 @@ func NewRestoreController(
|
|||
backupService cloudprovider.BackupService,
|
||||
bucket string,
|
||||
backupInformer informers.BackupInformer,
|
||||
pvProviderExists bool,
|
||||
) Interface {
|
||||
c := &restoreController{
|
||||
restoreClient: restoreClient,
|
||||
|
@ -71,6 +73,7 @@ func NewRestoreController(
|
|||
restorer: restorer,
|
||||
backupService: backupService,
|
||||
bucket: bucket,
|
||||
pvProviderExists: pvProviderExists,
|
||||
backupLister: backupInformer.Lister(),
|
||||
backupListerSynced: backupInformer.Informer().HasSynced,
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
|
@ -275,6 +278,10 @@ func (controller *restoreController) getValidationErrors(itm *api.Restore) []str
|
|||
validationErrors = append(validationErrors, "BackupName must be non-empty and correspond to the name of a backup in object storage.")
|
||||
}
|
||||
|
||||
if !controller.pvProviderExists && itm.Spec.RestorePVs != nil && *itm.Spec.RestorePVs {
|
||||
validationErrors = append(validationErrors, "Server is not configured for PV snapshot restores")
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
restore *api.Restore
|
||||
backup *api.Backup
|
||||
restorerError error
|
||||
allowRestoreSnapshots bool
|
||||
expectedErr bool
|
||||
expectedRestoreUpdates []*api.Restore
|
||||
expectedRestorerCall *api.Restore
|
||||
|
@ -137,6 +138,28 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
expectedRestorerCall: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("*").Restore,
|
||||
},
|
||||
{
|
||||
name: "valid restore with RestorePVs=true gets executed when allowRestoreSnapshots=true",
|
||||
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).Restore,
|
||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
||||
allowRestoreSnapshots: true,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).Restore,
|
||||
NewTestRestore("foo", "bar", api.RestorePhaseCompleted).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).Restore,
|
||||
},
|
||||
expectedRestorerCall: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).Restore,
|
||||
},
|
||||
{
|
||||
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
||||
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).Restore,
|
||||
backup: NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedErr: false,
|
||||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).WithBackup("backup-1").WithRestorableNamespace("ns-1").WithRestorePVs(true).
|
||||
WithValidationError("Server is not configured for PV snapshot restores").Restore,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// flag.Set("logtostderr", "true")
|
||||
|
@ -160,6 +183,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
backupSvc,
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
test.allowRestoreSnapshots,
|
||||
).(*restoreController)
|
||||
|
||||
if test.restore != nil {
|
||||
|
|
|
@ -382,7 +382,6 @@ func TestGetBackup(t *testing.T) {
|
|||
IncludedResources: []string{"foo", "bar"},
|
||||
ExcludedResources: []string{"baz"},
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
SnapshotVolumes: true,
|
||||
TTL: metav1.Duration{Duration: time.Duration(300)},
|
||||
},
|
||||
},
|
||||
|
@ -399,7 +398,6 @@ func TestGetBackup(t *testing.T) {
|
|||
IncludedResources: []string{"foo", "bar"},
|
||||
ExcludedResources: []string{"baz"},
|
||||
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
SnapshotVolumes: true,
|
||||
TTL: metav1.Duration{Duration: time.Duration(300)},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -432,7 +432,10 @@ func (kr *kubernetesRestorer) restoreResourceForNamespace(
|
|||
continue
|
||||
}
|
||||
|
||||
preparedObj, err := restorer.Prepare(obj, restore, backup)
|
||||
preparedObj, warning, err := restorer.Prepare(obj, restore, backup)
|
||||
if warning != nil {
|
||||
addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", fullPath, warning))
|
||||
}
|
||||
if err != nil {
|
||||
addToResult(&errors, namespace, fmt.Errorf("error preparing %s: %v", fullPath, err))
|
||||
continue
|
||||
|
|
|
@ -607,10 +607,10 @@ func newFakeCustomRestorer() *fakeCustomRestorer {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *fakeCustomRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (r *fakeCustomRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
metadata, err := collections.GetMap(obj.UnstructuredContent(), "metadata")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if _, found := metadata["labels"]; !found {
|
||||
|
|
|
@ -37,11 +37,11 @@ func (r *jobRestorer) Handles(obj runtime.Unstructured, restore *api.Restore) bo
|
|||
return true
|
||||
}
|
||||
|
||||
func (r *jobRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (r *jobRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
glog.V(4).Infof("resetting metadata and status")
|
||||
_, err := resetMetadataAndStatus(obj, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getting spec.selector.matchLabels")
|
||||
|
@ -59,7 +59,7 @@ func (r *jobRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, ba
|
|||
delete(templateLabels, "controller-uid")
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
func (r *jobRestorer) Wait() bool {
|
||||
|
|
|
@ -128,7 +128,7 @@ func TestJobRestorerPrepare(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
restorer := NewJobRestorer()
|
||||
|
||||
res, err := restorer.Prepare(test.obj, nil, nil)
|
||||
res, _, err := restorer.Prepare(test.obj, nil, nil)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, res)
|
||||
|
|
|
@ -46,27 +46,27 @@ func (nsr *namespaceRestorer) Handles(obj runtime.Unstructured, restore *api.Res
|
|||
return false
|
||||
}
|
||||
|
||||
func (nsr *namespaceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (nsr *namespaceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
updated, err := resetMetadataAndStatus(obj, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
metadata, err := collections.GetMap(obj.UnstructuredContent(), "metadata")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
currentName, err := collections.GetString(obj.UnstructuredContent(), "metadata.name")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if newName, mapped := restore.Spec.NamespaceMapping[currentName]; mapped {
|
||||
metadata["name"] = newName
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
return updated, nil, nil
|
||||
}
|
||||
|
||||
func (nsr *namespaceRestorer) Wait() bool {
|
||||
|
|
|
@ -19,11 +19,12 @@ package restorers
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
testutil "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestHandles(t *testing.T) {
|
||||
|
@ -36,19 +37,19 @@ func TestHandles(t *testing.T) {
|
|||
{
|
||||
name: "restorable NS",
|
||||
obj: NewTestUnstructured().WithName("ns-1").Unstructured,
|
||||
restore: newTestRestore().WithRestorableNamespace("ns-1").Restore,
|
||||
restore: testutil.NewDefaultTestRestore().WithRestorableNamespace("ns-1").Restore,
|
||||
expect: true,
|
||||
},
|
||||
{
|
||||
name: "non-restorable NS",
|
||||
obj: NewTestUnstructured().WithName("ns-1").Unstructured,
|
||||
restore: newTestRestore().WithRestorableNamespace("ns-2").Restore,
|
||||
restore: testutil.NewDefaultTestRestore().WithRestorableNamespace("ns-2").Restore,
|
||||
expect: false,
|
||||
},
|
||||
{
|
||||
name: "namespace obj doesn't have name",
|
||||
obj: NewTestUnstructured().WithMetadata().Unstructured,
|
||||
restore: newTestRestore().WithRestorableNamespace("ns-1").Restore,
|
||||
restore: testutil.NewDefaultTestRestore().WithRestorableNamespace("ns-1").Restore,
|
||||
expect: false,
|
||||
},
|
||||
}
|
||||
|
@ -72,27 +73,27 @@ func TestPrepare(t *testing.T) {
|
|||
{
|
||||
name: "standard non-mapped namespace",
|
||||
obj: NewTestUnstructured().WithStatus().WithName("ns-1").Unstructured,
|
||||
restore: newTestRestore().Restore,
|
||||
restore: testutil.NewDefaultTestRestore().Restore,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("ns-1").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "standard mapped namespace",
|
||||
obj: NewTestUnstructured().WithStatus().WithName("ns-1").Unstructured,
|
||||
restore: newTestRestore().WithMappedNamespace("ns-1", "ns-2").Restore,
|
||||
restore: testutil.NewDefaultTestRestore().WithMappedNamespace("ns-1", "ns-2").Restore,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("ns-2").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "object without name results in error",
|
||||
obj: NewTestUnstructured().WithMetadata().WithStatus().Unstructured,
|
||||
restore: newTestRestore().Restore,
|
||||
restore: testutil.NewDefaultTestRestore().Restore,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "annotations are kept",
|
||||
obj: NewTestUnstructured().WithName("ns-1").WithAnnotations().Unstructured,
|
||||
restore: newTestRestore().Restore,
|
||||
restore: testutil.NewDefaultTestRestore().Restore,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("ns-1").WithAnnotations().Unstructured,
|
||||
},
|
||||
|
@ -102,7 +103,7 @@ func TestPrepare(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
restorer := NewNamespaceRestorer()
|
||||
|
||||
res, err := restorer.Prepare(test.obj, test.restore, nil)
|
||||
res, _, err := restorer.Prepare(test.obj, test.restore, nil)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, res)
|
||||
|
@ -110,36 +111,3 @@ func TestPrepare(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testRestore struct {
|
||||
*api.Restore
|
||||
}
|
||||
|
||||
func newTestRestore() *testRestore {
|
||||
return &testRestore{
|
||||
Restore: &api.Restore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: api.DefaultNamespace,
|
||||
},
|
||||
Spec: api.RestoreSpec{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *testRestore) WithRestorableNamespace(namespace string) *testRestore {
|
||||
r.Spec.Namespaces = append(r.Spec.Namespaces, namespace)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *testRestore) WithMappedNamespace(from string, to string) *testRestore {
|
||||
if r.Spec.NamespaceMapping == nil {
|
||||
r.Spec.NamespaceMapping = make(map[string]string)
|
||||
}
|
||||
r.Spec.NamespaceMapping[from] = to
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *testRestore) WithRestorePVs(restorePVs bool) *testRestore {
|
||||
r.Spec.RestorePVs = restorePVs
|
||||
return r
|
||||
}
|
||||
|
|
|
@ -43,17 +43,17 @@ var (
|
|||
defaultTokenRegex = regexp.MustCompile("default-token-.*")
|
||||
)
|
||||
|
||||
func (nsr *podRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (nsr *podRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
glog.V(4).Infof("resetting metadata and status")
|
||||
_, err := resetMetadataAndStatus(obj, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("getting spec")
|
||||
spec, err := collections.GetMap(obj.UnstructuredContent(), "spec")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("deleting spec.NodeName")
|
||||
|
@ -79,7 +79,7 @@ func (nsr *podRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore,
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("setting spec.volumes")
|
||||
|
@ -114,10 +114,10 @@ func (nsr *podRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore,
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
func (nsr *podRestorer) Wait() bool {
|
||||
|
|
|
@ -98,7 +98,7 @@ func TestPodRestorerPrepare(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
restorer := NewPodRestorer()
|
||||
|
||||
res, err := restorer.Prepare(test.obj, nil, nil)
|
||||
res, _, err := restorer.Prepare(test.obj, nil, nil)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, res)
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
)
|
||||
|
||||
type persistentVolumeRestorer struct {
|
||||
|
@ -43,31 +44,79 @@ func (sr *persistentVolumeRestorer) Handles(obj runtime.Unstructured, restore *a
|
|||
return true
|
||||
}
|
||||
|
||||
func (sr *persistentVolumeRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (sr *persistentVolumeRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
if _, err := resetMetadataAndStatus(obj, false); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
spec, err := collections.GetMap(obj.UnstructuredContent(), "spec")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
delete(spec, "claimRef")
|
||||
delete(spec, "storageClassName")
|
||||
|
||||
if restore.Spec.RestorePVs {
|
||||
volumeID, err := sr.restoreVolume(obj.UnstructuredContent(), restore, backup)
|
||||
pvName, err := collections.GetString(obj.UnstructuredContent(), "metadata.name")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := setVolumeID(spec, volumeID); err != nil {
|
||||
return nil, err
|
||||
// if it's an unsupported volume type for snapshot restores, we're done
|
||||
if sourceType, _ := kubeutil.GetPVSource(spec); sourceType == "" {
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
restoreFromSnapshot := false
|
||||
|
||||
if restore.Spec.RestorePVs != nil && *restore.Spec.RestorePVs {
|
||||
// when RestorePVs = yes, it's an error if we don't have a snapshot service
|
||||
if sr.snapshotService == nil {
|
||||
return nil, nil, errors.New("PV restorer is not configured for PV snapshot restores")
|
||||
}
|
||||
|
||||
// if there are no snapshots in the backup, return without error
|
||||
if backup.Status.VolumeBackups == nil {
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
// if there are snapshots, and this is a supported PV type, but there's no
|
||||
// snapshot for this PV, it's an error
|
||||
if backup.Status.VolumeBackups[pvName] == nil {
|
||||
return nil, nil, fmt.Errorf("no snapshot found to restore volume %s from", pvName)
|
||||
}
|
||||
|
||||
restoreFromSnapshot = true
|
||||
}
|
||||
if restore.Spec.RestorePVs == nil && sr.snapshotService != nil {
|
||||
// when RestorePVs = Auto, don't error if the backup doesn't have snapshots
|
||||
if backup.Status.VolumeBackups == nil || backup.Status.VolumeBackups[pvName] == nil {
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
restoreFromSnapshot = true
|
||||
}
|
||||
|
||||
if restoreFromSnapshot {
|
||||
backupInfo := backup.Status.VolumeBackups[pvName]
|
||||
|
||||
volumeID, err := sr.snapshotService.CreateVolumeFromSnapshot(backupInfo.SnapshotID, backupInfo.Type, backupInfo.Iops)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := kubeutil.SetVolumeID(spec, volumeID); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
var warning error
|
||||
|
||||
if sr.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
|
||||
warning = errors.New("unable to restore PV snapshots: Ark server is not configured with a PersistentVolumeProvider")
|
||||
}
|
||||
|
||||
return obj, warning, nil
|
||||
}
|
||||
|
||||
func (sr *persistentVolumeRestorer) Wait() bool {
|
||||
|
@ -79,39 +128,3 @@ func (sr *persistentVolumeRestorer) Ready(obj runtime.Unstructured) bool {
|
|||
|
||||
return err == nil && phase == "Available"
|
||||
}
|
||||
|
||||
func setVolumeID(spec map[string]interface{}, volumeID string) error {
|
||||
if pvSource, found := spec["awsElasticBlockStore"]; found {
|
||||
pvSourceObj := pvSource.(map[string]interface{})
|
||||
pvSourceObj["volumeID"] = volumeID
|
||||
return nil
|
||||
} else if pvSource, found := spec["gcePersistentDisk"]; found {
|
||||
pvSourceObj := pvSource.(map[string]interface{})
|
||||
pvSourceObj["pdName"] = volumeID
|
||||
return nil
|
||||
} else if pvSource, found := spec["azureDisk"]; found {
|
||||
pvSourceObj := pvSource.(map[string]interface{})
|
||||
pvSourceObj["diskName"] = volumeID
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("persistent volume source is not compatible")
|
||||
}
|
||||
|
||||
func (sr *persistentVolumeRestorer) restoreVolume(item map[string]interface{}, restore *api.Restore, backup *api.Backup) (string, error) {
|
||||
pvName, err := collections.GetString(item, "metadata.name")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if backup.Status.VolumeBackups == nil {
|
||||
return "", fmt.Errorf("VolumeBackups map not found for persistent volume %s", pvName)
|
||||
}
|
||||
|
||||
backupInfo, found := backup.Status.VolumeBackups[pvName]
|
||||
if !found {
|
||||
return "", fmt.Errorf("BackupInfo not found for PersistentVolume %s", pvName)
|
||||
}
|
||||
|
||||
return sr.snapshotService.CreateVolumeFromSnapshot(backupInfo.SnapshotID, backupInfo.Type, backupInfo.Iops)
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
. "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
|
@ -37,36 +38,45 @@ func TestPVRestorerPrepare(t *testing.T) {
|
|||
restore *api.Restore
|
||||
backup *api.Backup
|
||||
volumeMap map[api.VolumeBackupInfo]string
|
||||
noSnapshotService bool
|
||||
expectedWarn bool
|
||||
expectedErr bool
|
||||
expectedRes runtime.Unstructured
|
||||
}{
|
||||
{
|
||||
name: "no name should error",
|
||||
obj: NewTestUnstructured().WithMetadata().Unstructured,
|
||||
restore: newTestRestore().Restore,
|
||||
restore: NewDefaultTestRestore().Restore,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "no spec should error",
|
||||
obj: NewTestUnstructured().WithName("pv-1").Unstructured,
|
||||
restore: newTestRestore().Restore,
|
||||
restore: NewDefaultTestRestore().Restore,
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "when RestorePVs=false, should not error if there is no PV->BackupInfo map",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(false).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(false).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{}},
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured,
|
||||
},
|
||||
{
|
||||
name: "when RestorePVs=true, error if there is no PV->BackupInfo map",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
name: "when RestorePVs=true, return without error if there is no PV->BackupInfo map",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{}},
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "when RestorePVs=true, error if there is PV->BackupInfo map but no entry for this PV",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"another-pv": &api.VolumeBackupInfo{}}}},
|
||||
expectedErr: true,
|
||||
expectedRes: nil,
|
||||
},
|
||||
{
|
||||
name: "claimRef and storageClassName (only) should be cleared from spec",
|
||||
|
@ -76,7 +86,7 @@ func TestPVRestorerPrepare(t *testing.T) {
|
|||
WithSpecField("storageClassName", "foo").
|
||||
WithSpecField("foo", "bar").
|
||||
Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(false).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(false).Restore,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().
|
||||
WithName("pv-1").
|
||||
|
@ -86,7 +96,7 @@ func TestPVRestorerPrepare(t *testing.T) {
|
|||
{
|
||||
name: "when RestorePVs=true, AWS volume ID should be set correctly",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1"}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1"}: "volume-1"},
|
||||
expectedErr: false,
|
||||
|
@ -95,7 +105,7 @@ func TestPVRestorerPrepare(t *testing.T) {
|
|||
{
|
||||
name: "when RestorePVs=true, GCE pdName should be set correctly",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("gcePersistentDisk", make(map[string]interface{})).Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1"}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1"}: "volume-1"},
|
||||
expectedErr: false,
|
||||
|
@ -104,37 +114,54 @@ func TestPVRestorerPrepare(t *testing.T) {
|
|||
{
|
||||
name: "when RestorePVs=true, Azure pdName should be set correctly",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("azureDisk", make(map[string]interface{})).Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1"}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1"}: "volume-1"},
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpecField("azureDisk", map[string]interface{}{"diskName": "volume-1"}).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "when RestorePVs=true, unsupported PV source should cause error",
|
||||
name: "when RestorePVs=true, unsupported PV source should not get snapshot restored",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("unsupportedPVSource", make(map[string]interface{})).Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1"}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1"}: "volume-1"},
|
||||
expectedErr: true,
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpecField("unsupportedPVSource", make(map[string]interface{})).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "volume type and IOPS are correctly passed to CreateVolume",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
restore: newTestRestore().WithRestorePVs(true).Restore,
|
||||
restore: NewDefaultTestRestore().WithRestorePVs(true).Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1", Type: "gp", Iops: &iops}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1", Type: "gp", Iops: &iops}: "volume-1"},
|
||||
expectedErr: false,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", map[string]interface{}{"volumeID": "volume-1"}).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "When no SnapshotService, warn if backup has snapshots that will not be restored",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
restore: NewDefaultTestRestore().Restore,
|
||||
backup: &api.Backup{Status: api.BackupStatus{VolumeBackups: map[string]*api.VolumeBackupInfo{"pv-1": &api.VolumeBackupInfo{SnapshotID: "snap-1"}}}},
|
||||
volumeMap: map[api.VolumeBackupInfo]string{api.VolumeBackupInfo{SnapshotID: "snap-1"}: "volume-1"},
|
||||
noSnapshotService: true,
|
||||
expectedErr: false,
|
||||
expectedWarn: true,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithSpecField("awsElasticBlockStore", make(map[string]interface{})).Unstructured,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapService := &FakeSnapshotService{RestorableVolumes: test.volumeMap}
|
||||
restorer := NewPersistentVolumeRestorer(snapService)
|
||||
var snapshotService cloudprovider.SnapshotService
|
||||
if !test.noSnapshotService {
|
||||
snapshotService = &FakeSnapshotService{RestorableVolumes: test.volumeMap}
|
||||
}
|
||||
restorer := NewPersistentVolumeRestorer(snapshotService)
|
||||
|
||||
res, err := restorer.Prepare(test.obj, test.restore, test.backup)
|
||||
res, warn, err := restorer.Prepare(test.obj, test.restore, test.backup)
|
||||
|
||||
assert.Equal(t, test.expectedWarn, warn != nil)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, res)
|
||||
|
|
|
@ -35,8 +35,10 @@ func (sr *persistentVolumeClaimRestorer) Handles(obj runtime.Unstructured, resto
|
|||
return true
|
||||
}
|
||||
|
||||
func (sr *persistentVolumeClaimRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
return resetMetadataAndStatus(obj, true)
|
||||
func (sr *persistentVolumeClaimRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
res, err := resetMetadataAndStatus(obj, true)
|
||||
|
||||
return res, nil, err
|
||||
}
|
||||
|
||||
func (sr *persistentVolumeClaimRestorer) Wait() bool {
|
||||
|
|
|
@ -29,8 +29,8 @@ type ResourceRestorer interface {
|
|||
// Handles returns true if the Restorer should restore this object.
|
||||
Handles(obj runtime.Unstructured, restore *api.Restore) bool
|
||||
|
||||
// Prepare gets an item ready to be restored
|
||||
Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error)
|
||||
// Prepare gets an item ready to be restored.
|
||||
Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (res runtime.Unstructured, warning error, err error)
|
||||
|
||||
// Wait returns true if restoration should wait for all of this restorer's resources to be ready before moving on to the next restorer.
|
||||
Wait() bool
|
||||
|
@ -66,8 +66,10 @@ func (br *basicRestorer) Handles(obj runtime.Unstructured, restore *api.Restore)
|
|||
return true
|
||||
}
|
||||
|
||||
func (br *basicRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
return resetMetadataAndStatus(obj, br.saveAnnotations)
|
||||
func (br *basicRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
obj, err := resetMetadataAndStatus(obj, br.saveAnnotations)
|
||||
|
||||
return obj, err, nil
|
||||
}
|
||||
|
||||
func (br *basicRestorer) Wait() bool {
|
||||
|
|
|
@ -35,21 +35,21 @@ func (sr *serviceRestorer) Handles(obj runtime.Unstructured, restore *api.Restor
|
|||
return true
|
||||
}
|
||||
|
||||
func (sr *serviceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error) {
|
||||
func (sr *serviceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restore, backup *api.Backup) (runtime.Unstructured, error, error) {
|
||||
if _, err := resetMetadataAndStatus(obj, true); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
spec, err := collections.GetMap(obj.UnstructuredContent(), "spec")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
delete(spec, "clusterIP")
|
||||
|
||||
ports, err := collections.GetSlice(obj.UnstructuredContent(), "spec.ports")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, port := range ports {
|
||||
|
@ -57,7 +57,7 @@ func (sr *serviceRestorer) Prepare(obj runtime.Unstructured, restore *api.Restor
|
|||
delete(p, "nodePort")
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
return obj, nil, nil
|
||||
}
|
||||
|
||||
func (sr *serviceRestorer) Wait() bool {
|
||||
|
|
|
@ -62,7 +62,7 @@ func TestServiceRestorerPrepare(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
restorer := NewServiceRestorer()
|
||||
|
||||
res, err := restorer.Prepare(test.obj, nil, nil)
|
||||
res, _, err := restorer.Prepare(test.obj, nil, nil)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, res)
|
||||
|
|
|
@ -17,9 +17,14 @@ limitations under the License.
|
|||
package kube
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
// EnsureNamespaceExists attempts to create the provided Kubernetes namespace. It returns two values:
|
||||
|
@ -35,3 +40,67 @@ func EnsureNamespaceExists(namespace *v1.Namespace, client corev1.NamespaceInter
|
|||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
var ebsVolumeIDRegex = regexp.MustCompile("vol-.*")
|
||||
|
||||
var supportedVolumeTypes = map[string]string{
|
||||
"awsElasticBlockStore": "volumeID",
|
||||
"gcePersistentDisk": "pdName",
|
||||
"azureDisk": "diskName",
|
||||
}
|
||||
|
||||
// GetVolumeID looks for a supported PV source within the provided PV unstructured
|
||||
// data. It returns the appropriate volume ID field if found. If the PV source
|
||||
// is supported but a volume ID cannot be found, an error is returned; if the PV
|
||||
// source is not supported, zero values are returned.
|
||||
func GetVolumeID(pv map[string]interface{}) (string, error) {
|
||||
spec, err := collections.GetMap(pv, "spec")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for volumeType, volumeIDKey := range supportedVolumeTypes {
|
||||
if pvSource, err := collections.GetMap(spec, volumeType); err == nil {
|
||||
volumeID, err := collections.GetString(pvSource, volumeIDKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volumeType == "awsElasticBlockStore" {
|
||||
return ebsVolumeIDRegex.FindString(volumeID), nil
|
||||
}
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetPVSource looks for a supported PV source within the provided PV spec data.
|
||||
// It returns the name of the PV source type and the unstructured source data if
|
||||
// one is found, or zero values otherwise.
|
||||
func GetPVSource(spec map[string]interface{}) (string, map[string]interface{}) {
|
||||
for volumeType := range supportedVolumeTypes {
|
||||
if pvSource, found := spec[volumeType]; found {
|
||||
return volumeType, pvSource.(map[string]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// SetVolumeID looks for a supported PV source within the provided PV spec data.
|
||||
// If sets the appropriate ID field within the source if found, and returns an
|
||||
// error if a supported PV source is not found.
|
||||
func SetVolumeID(spec map[string]interface{}, volumeID string) error {
|
||||
sourceType, source := GetPVSource(spec)
|
||||
|
||||
if sourceType == "" {
|
||||
return errors.New("persistent volume source is not compatible")
|
||||
}
|
||||
|
||||
source[supportedVolumeTypes[sourceType]] = volumeID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -104,3 +104,13 @@ func (b *TestBackup) WithSnapshot(pv string, snapshot string) *TestBackup {
|
|||
b.Status.VolumeBackups[pv] = &v1.VolumeBackupInfo{SnapshotID: snapshot}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackup) WithSnapshotVolumes(value bool) *TestBackup {
|
||||
b.Spec.SnapshotVolumes = &value
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackup) WithSnapshotVolumesPointer(value *bool) *TestBackup {
|
||||
b.Spec.SnapshotVolumes = value
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -41,6 +41,10 @@ func NewTestRestore(ns, name string, phase api.RestorePhase) *TestRestore {
|
|||
}
|
||||
}
|
||||
|
||||
func NewDefaultTestRestore() *TestRestore {
|
||||
return NewTestRestore(api.DefaultNamespace, "", api.RestorePhase(""))
|
||||
}
|
||||
|
||||
func (r *TestRestore) WithRestorableNamespace(name string) *TestRestore {
|
||||
r.Spec.Namespaces = append(r.Spec.Namespaces, name)
|
||||
return r
|
||||
|
@ -60,3 +64,16 @@ func (r *TestRestore) WithErrors(e api.RestoreResult) *TestRestore {
|
|||
r.Status.Errors = e
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *TestRestore) WithRestorePVs(value bool) *TestRestore {
|
||||
r.Spec.RestorePVs = &value
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *TestRestore) WithMappedNamespace(from string, to string) *TestRestore {
|
||||
if r.Spec.NamespaceMapping == nil {
|
||||
r.Spec.NamespaceMapping = make(map[string]string)
|
||||
}
|
||||
r.Spec.NamespaceMapping[from] = to
|
||||
return r
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue