GCP: copy tags from volume to snapshot, and snapshot to volume
Signed-off-by: Steve Kriss <steve@heptio.com>pull/341/head
parent
9673e9d158
commit
cab904570f
|
@ -23,6 +23,7 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
uuid "github.com/satori/go.uuid"
|
uuid "github.com/satori/go.uuid"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
|
@ -38,10 +39,11 @@ const projectKey = "project"
|
||||||
type blockStore struct {
|
type blockStore struct {
|
||||||
gce *compute.Service
|
gce *compute.Service
|
||||||
project string
|
project string
|
||||||
|
log logrus.FieldLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBlockStore() cloudprovider.BlockStore {
|
func NewBlockStore(log logrus.FieldLogger) cloudprovider.BlockStore {
|
||||||
return &blockStore{}
|
return &blockStore{log: log}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *blockStore) Init(config map[string]string) error {
|
func (b *blockStore) Init(config map[string]string) error {
|
||||||
|
@ -99,15 +101,22 @@ func extractProjectFromCreds() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||||
|
// get the snapshot so we can apply its tags to the volume
|
||||||
res, err := b.gce.Snapshots.Get(b.project, snapshotID).Do()
|
res, err := b.gce.Snapshots.Get(b.project, snapshotID).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.WithStack(err)
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Kubernetes uses the description field of GCP disks to store a JSON doc containing
|
||||||
|
// tags.
|
||||||
|
//
|
||||||
|
// use the snapshot's description (which contains tags from the snapshotted disk
|
||||||
|
// plus Ark-specific tags) to set the new disk's description.
|
||||||
disk := &compute.Disk{
|
disk := &compute.Disk{
|
||||||
Name: "restore-" + uuid.NewV4().String(),
|
Name: "restore-" + uuid.NewV4().String(),
|
||||||
SourceSnapshot: res.SelfLink,
|
SourceSnapshot: res.SelfLink,
|
||||||
Type: volumeType,
|
Type: volumeType,
|
||||||
|
Description: res.Description,
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = b.gce.Disks.Insert(b.project, volumeAZ, disk).Do(); err != nil {
|
if _, err = b.gce.Disks.Insert(b.project, volumeAZ, disk).Do(); err != nil {
|
||||||
|
@ -148,12 +157,17 @@ func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s
|
||||||
snapshotName = volumeID[0:63-len(suffix)] + suffix
|
snapshotName = volumeID[0:63-len(suffix)] + suffix
|
||||||
}
|
}
|
||||||
|
|
||||||
gceSnap := compute.Snapshot{
|
disk, err := b.gce.Disks.Get(b.project, volumeAZ, volumeID).Do()
|
||||||
Name: snapshotName,
|
if err != nil {
|
||||||
Labels: tags,
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := b.gce.Disks.CreateSnapshot(b.project, volumeAZ, volumeID, &gceSnap).Do()
|
gceSnap := compute.Snapshot{
|
||||||
|
Name: snapshotName,
|
||||||
|
Description: getSnapshotTags(tags, disk.Description, b.log),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = b.gce.Disks.CreateSnapshot(b.project, volumeAZ, volumeID, &gceSnap).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.WithStack(err)
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
@ -161,6 +175,40 @@ func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s
|
||||||
return gceSnap.Name, nil
|
return gceSnap.Name, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getSnapshotTags(arkTags map[string]string, diskDescription string, log logrus.FieldLogger) string {
|
||||||
|
// Kubernetes uses the description field of GCP disks to store a JSON doc containing
|
||||||
|
// tags.
|
||||||
|
//
|
||||||
|
// use the tags in the disk's description (if a valid JSON doc) plus the tags arg
|
||||||
|
// to set the snapshot's description.
|
||||||
|
var snapshotTags map[string]string
|
||||||
|
if err := json.Unmarshal([]byte(diskDescription), &snapshotTags); err != nil {
|
||||||
|
// error decoding the disk's description, so just use the Ark-assigned tags
|
||||||
|
log.WithError(err).
|
||||||
|
Error("unable to decode disk's description as JSON, so only applying Ark-assigned tags to snapshot")
|
||||||
|
snapshotTags = arkTags
|
||||||
|
} else {
|
||||||
|
// merge Ark-assigned tags with the disk's tags (note that we want current
|
||||||
|
// Ark-assigned tags to overwrite any older versions of them that may exist
|
||||||
|
// due to prior snapshots/restores)
|
||||||
|
for k, v := range arkTags {
|
||||||
|
snapshotTags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshotTags) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsJSON, err := json.Marshal(snapshotTags)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("unable to encode snapshot's tags to JSON, so not tagging snapshot")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(tagsJSON)
|
||||||
|
}
|
||||||
|
|
||||||
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
|
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
|
||||||
_, err := b.gce.Snapshots.Delete(b.project, snapshotID).Do()
|
_, err := b.gce.Snapshots.Delete(b.project, snapshotID).Do()
|
||||||
|
|
||||||
|
|
|
@ -17,9 +17,12 @@ limitations under the License.
|
||||||
package gcp
|
package gcp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/heptio/ark/pkg/util/collections"
|
"github.com/heptio/ark/pkg/util/collections"
|
||||||
|
arktest "github.com/heptio/ark/pkg/util/test"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
@ -72,3 +75,77 @@ func TestSetVolumeID(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "123abc", actual)
|
assert.Equal(t, "123abc", actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetSnapshotTags(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
arkTags map[string]string
|
||||||
|
diskDescription string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "degenerate case (no tags)",
|
||||||
|
arkTags: nil,
|
||||||
|
diskDescription: "",
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ark tags only get applied",
|
||||||
|
arkTags: map[string]string{
|
||||||
|
"ark-key1": "ark-val1",
|
||||||
|
"ark-key2": "ark-val2",
|
||||||
|
},
|
||||||
|
diskDescription: "",
|
||||||
|
expected: `{"ark-key1":"ark-val1","ark-key2":"ark-val2"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "disk tags only get applied",
|
||||||
|
arkTags: nil,
|
||||||
|
diskDescription: `{"aws-key1":"aws-val1","aws-key2":"aws-val2"}`,
|
||||||
|
expected: `{"aws-key1":"aws-val1","aws-key2":"aws-val2"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-overlapping ark and disk tags both get applied",
|
||||||
|
arkTags: map[string]string{"ark-key": "ark-val"},
|
||||||
|
diskDescription: `{"aws-key":"aws-val"}`,
|
||||||
|
expected: `{"ark-key":"ark-val","aws-key":"aws-val"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "when tags overlap, ark tags take precedence",
|
||||||
|
arkTags: map[string]string{
|
||||||
|
"ark-key": "ark-val",
|
||||||
|
"overlapping-key": "ark-val",
|
||||||
|
},
|
||||||
|
diskDescription: `{"aws-key":"aws-val","overlapping-key":"aws-val"}`,
|
||||||
|
expected: `{"ark-key":"ark-val","aws-key":"aws-val","overlapping-key":"ark-val"}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "if disk description is invalid JSON, apply just ark tags",
|
||||||
|
arkTags: map[string]string{"ark-key": "ark-val"},
|
||||||
|
diskDescription: `THIS IS INVALID JSON`,
|
||||||
|
expected: `{"ark-key":"ark-val"}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
res := getSnapshotTags(test.arkTags, test.diskDescription, arktest.NewLogger())
|
||||||
|
|
||||||
|
if test.expected == "" {
|
||||||
|
assert.Equal(t, test.expected, res)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var actualMap map[string]interface{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(res), &actualMap))
|
||||||
|
|
||||||
|
var expectedMap map[string]interface{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(test.expected), &expectedMap))
|
||||||
|
|
||||||
|
assert.Equal(t, len(expectedMap), len(actualMap))
|
||||||
|
for k, v := range expectedMap {
|
||||||
|
assert.Equal(t, v, actualMap[k])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func NewCommand() *cobra.Command {
|
||||||
|
|
||||||
blockStores := map[string]cloudprovider.BlockStore{
|
blockStores := map[string]cloudprovider.BlockStore{
|
||||||
"aws": aws.NewBlockStore(),
|
"aws": aws.NewBlockStore(),
|
||||||
"gcp": gcp.NewBlockStore(),
|
"gcp": gcp.NewBlockStore(logger),
|
||||||
"azure": azure.NewBlockStore(),
|
"azure": azure.NewBlockStore(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue