add restic integration for doing pod volume backups/restores

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/508/head
Steve Kriss 2018-02-27 17:35:35 -08:00
parent c2c5b9040c
commit 50d4084fac
86 changed files with 5421 additions and 485 deletions

33
Dockerfile-ark.alpine Normal file
View File

@ -0,0 +1,33 @@
# Copyright 2017 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.7
MAINTAINER Andy Goldstein <andy@heptio.com>
RUN apk add --no-cache ca-certificates
ADD /bin/linux/amd64/ark /ark
RUN apk add --update --no-cache bzip2 && \
wget --quiet https://github.com/restic/restic/releases/download/v0.9.0/restic_0.9.0_linux_amd64.bz2 && \
bunzip2 restic_0.9.0_linux_amd64.bz2 && \
mv restic_0.9.0_linux_amd64 /restic && \
chmod +x /restic
ADD /complete-restore.sh /complete-restore.sh
USER nobody:nobody
ENTRYPOINT ["/ark"]

View File

@ -1,4 +1,4 @@
# Copyright 2017 the Heptio Ark contributors.
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -14,12 +14,10 @@
FROM alpine:3.7
MAINTAINER Andy Goldstein <andy@heptio.com>
MAINTAINER Steve Kriss <steve@heptio.com>
RUN apk add --no-cache ca-certificates
ADD /bin/linux/amd64/ark /ark
ADD /bin/linux/amd64/restic-init-container .
USER nobody:nobody
ENTRYPOINT ["/ark"]
ENTRYPOINT [ "/restic-init-container" ]

View File

@ -15,7 +15,7 @@
# limitations under the License.
# The binary to build (just the basename).
BIN := ark
BIN ?= ark
# This repo's root import path (under GOPATH).
PKG := github.com/heptio/ark
@ -44,7 +44,7 @@ GOARCH = $(word 2, $(platform_temp))
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
# Set default base image dynamically for each arch
ifeq ($(GOARCH),amd64)
DOCKERFILE ?= Dockerfile.alpine
DOCKERFILE ?= Dockerfile-$(BIN).alpine
endif
#ifeq ($(GOARCH),arm)
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
@ -58,7 +58,9 @@ IMAGE := $(REGISTRY)/$(BIN)
# If you want to build all binaries, see the 'all-build' rule.
# If you want to build all containers, see the 'all-container' rule.
# If you want to build AND push all containers, see the 'all-push' rule.
all: build
all:
@$(MAKE) build
@$(MAKE) build BIN=restic-init-container
build-%:
@$(MAKE) --no-print-directory ARCH=$* build
@ -94,10 +96,13 @@ BUILDER_IMAGE := ark-builder
# Example: make shell CMD="date > datefile"
shell: build-dirs build-image
@# the volume bind-mount of $PWD/vendor/k8s.io/api is needed for code-gen to
@# function correctly (ref. https://github.com/kubernetes/kubernetes/pull/64567)
@docker run \
-i $(TTY) \
--rm \
-u $$(id -u):$$(id -g) \
-v "$$(pwd)/vendor/k8s.io/api:/go/src/k8s.io/api:delegated" \
-v "$$(pwd)/.go/pkg:/go/pkg:delegated" \
-v "$$(pwd)/.go/std:/go/std:delegated" \
-v "$$(pwd):/go/src/$(PKG):delegated" \
@ -110,15 +115,25 @@ shell: build-dirs build-image
DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
all-containers:
$(MAKE) container
$(MAKE) container BIN=restic-init-container
container: verify test .container-$(DOTFILE_IMAGE) container-name
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
@cp $(DOCKERFILE) _output/.dockerfile-$(GOOS)-$(GOARCH)
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(GOOS)-$(GOARCH) _output
@# TODO this is ugly
@cp restic/complete-restore.sh _output/
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
@docker images -q $(IMAGE):$(VERSION) > $@
container-name:
@echo "container: $(IMAGE):$(VERSION)"
all-push:
$(MAKE) push
$(MAKE) push BIN=restic-init-container
push: .push-$(DOTFILE_IMAGE) push-name
.push-$(DOTFILE_IMAGE): .container-$(DOTFILE_IMAGE)
@docker push $(IMAGE):$(VERSION)
@ -182,4 +197,4 @@ clean:
rm -rf .go _output
docker rmi $(BUILDER_IMAGE)
ci: build verify test
ci: all verify test

View File

@ -0,0 +1,77 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
func main() {
if len(os.Args) != 2 {
fmt.Fprintln(os.Stderr, "ERROR: exactly one argument must be provided, the restore's UID")
os.Exit(1)
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if done() {
fmt.Println("All restic restores are done")
return
}
}
}
}
// done returns true if for each directory under /restores, a file exists
// within the .ark/ subdirectory whose name is equal to os.Args[1], or
// false otherwise
func done() bool {
children, err := ioutil.ReadDir("/restores")
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR reading /restores directory: %s\n", err)
return false
}
for _, child := range children {
if !child.IsDir() {
fmt.Printf("%s is not a directory, skipping.\n", child.Name())
continue
}
doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1])
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
fmt.Printf("Not found: %s\n", doneFile)
return false
} else if err != nil {
fmt.Fprintf(os.Stderr, "ERROR looking for %s: %s\n", doneFile, err)
return false
}
fmt.Printf("Found %s", doneFile)
}
return true
}

View File

@ -34,10 +34,12 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
* [ark client](ark_client.md) - Ark client related commands
* [ark completion](ark_completion.md) - Output shell completion code for the specified shell (bash or zsh)
* [ark create](ark_create.md) - Create ark resources
* [ark daemonset](ark_daemonset.md) - Run the ark daemonset
* [ark delete](ark_delete.md) - Delete ark resources
* [ark describe](ark_describe.md) - Describe ark resources
* [ark get](ark_get.md) - Get ark resources
* [ark plugin](ark_plugin.md) - Work with plugins
* [ark restic](ark_restic.md) - Work with restic repositories
* [ark restore](ark_restore.md) - Work with restores
* [ark schedule](ark_schedule.md) - Work with schedules
* [ark server](ark_server.md) - Run the ark server

View File

@ -0,0 +1,38 @@
## ark daemonset
Run the ark daemonset
### Synopsis
Run the ark daemonset
```
ark daemonset [flags]
```
### Options
```
-h, --help help for daemonset
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.

View File

@ -0,0 +1,34 @@
## ark restic
Work with restic repositories
### Synopsis
Work with restic repositories
### Options
```
-h, --help help for restic
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark restic init-repository](ark_restic_init-repository.md) - create an encryption key for a restic repository

View File

@ -0,0 +1,40 @@
## ark restic init-repository
create an encryption key for a restic repository
### Synopsis
create an encryption key for a restic repository
```
ark restic init-repository [flags]
```
### Options
```
-h, --help help for init-repository
--key-data string Encryption key for the restic repository. Optional; if unset, Ark will generate a random key for you.
--key-file string Path to file containing the encryption key for the restic repository. Optional; if unset, Ark will generate a random key for you.
--key-size int Size of the generated key for the restic repository (default 1024)
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark restic](ark_restic.md) - Work with restic repositories

View File

@ -0,0 +1,61 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- daemonset
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud

View File

@ -0,0 +1,68 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- daemonset
volumeMounts:
- name: host-pods
mountPath: /host_pods
envFrom:
- secretRef:
name: cloud-credentials
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AZURE_ACCOUNT_NAME
valueFrom:
secretKeyRef:
name: cloud-credentials
key: AZURE_STORAGE_ACCOUNT_ID
- name: AZURE_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: cloud-credentials
key: AZURE_STORAGE_KEY

View File

@ -102,6 +102,36 @@ spec:
plural: deletebackuprequests
kind: DeleteBackupRequest
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: podvolumebackups.ark.heptio.com
labels:
component: ark
spec:
group: ark.heptio.com
version: v1
scope: Namespaced
names:
plural: podvolumebackups
kind: PodVolumeBackup
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: podvolumerestores.ark.heptio.com
labels:
component: ark
spec:
group: ark.heptio.com
version: v1
scope: Namespaced
names:
plural: podvolumerestores
kind: PodVolumeRestore
---
apiVersion: v1
kind: Namespace

View File

@ -0,0 +1,61 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- daemonset
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /credentials/cloud

View File

@ -0,0 +1,61 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- daemonset
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud

View File

@ -58,6 +58,10 @@ type Config struct {
// new backups that should be triggered based on schedules.
ScheduleSyncPeriod metav1.Duration `json:"scheduleSyncPeriod"`
// PodVolumeOperationTimeout is how long backups/restores of pod volumes (i.e.
// using restic) should be allowed to run before timing out.
PodVolumeOperationTimeout metav1.Duration `json:"podVolumeOperationTimeout"`
// ResourcePriorities is an ordered slice of resources specifying the desired
// order of resource restores. Any resources not in the list will be restored
// alphabetically after the prioritized resources.
@ -86,4 +90,10 @@ type ObjectStorageProviderConfig struct {
// Bucket is the name of the bucket in object storage where Ark backups
// are stored.
Bucket string `json:"bucket"`
// ResticLocation is the bucket and optional prefix in object storage where
// Ark stores restic backups of pod volumes, specified either as "bucket" or
// "bucket/prefix". This bucket must be different than the `Bucket` field.
// Optional.
ResticLocation string `json:"resticLocation"`
}

View File

@ -33,11 +33,6 @@ const (
DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress"
// DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed.
DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed"
// BackupNameLabel is the label key used by a DeleteBackupRequest to identify its backup by name.
BackupNameLabel = "ark.heptio.com/backup-name"
// BackupUIDLabel is the label key used by a DeleteBackupRequest to identify its backup by uid.
BackupUIDLabel = "ark.heptio.com/backup-uid"
)
// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest.

View File

@ -0,0 +1,39 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
// BackupNameLabel is the label key used to identify a backup by name.
BackupNameLabel = "ark.heptio.com/backup-name"
// BackupUIDLabel is the label key used to identify a backup by uid.
BackupUIDLabel = "ark.heptio.com/backup-uid"
// RestoreNameLabel is the label key used to identify a restore by name.
RestoreNameLabel = "ark.heptio.com/restore-name"
// RestoreUIDLabel is the label key used to identify a restore by uid.
RestoreUIDLabel = "ark.heptio.com/restore-uid"
// PodUIDLabel is the label key used to identify a pod by uid.
PodUIDLabel = "ark.heptio.com/pod-uid"
// PodVolumeOperationTimeoutAnnotation is the annotation key used to apply
// a backup/restore-specific timeout value for pod volume operations (i.e.
// restic backups/restores).
PodVolumeOperationTimeoutAnnotation = "ark.heptio.com/pod-volume-timeout"
)

View File

@ -0,0 +1,88 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodVolumeBackupSpec is the specification for a PodVolumeBackup.
type PodVolumeBackupSpec struct {
// Node is the name of the node that the Pod is running on.
Node string `json:"node"`
// Pod is a reference to the pod containing the volume to be backed up.
Pod corev1api.ObjectReference `json:"pod"`
// Volume is the name of the volume within the Pod to be backed
// up.
Volume string `json:"volume"`
// RepoPrefix is the restic repository prefix (i.e. not containing
// the repository name itself).
RepoPrefix string `json:"repoPrefix"`
// Tags are a map of key-value pairs that should be applied to the
// volume backup as tags.
Tags map[string]string `json:"tags"`
}
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
type PodVolumeBackupPhase string
const (
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
)
// PodVolumeBackupStatus is the current status of a PodVolumeBackup.
type PodVolumeBackupStatus struct {
// Phase is the current state of the PodVolumeBackup.
Phase PodVolumeBackupPhase `json:"phase"`
// Path is the full path within the controller pod being backed up.
Path string `json:"path"`
// SnapshotID is the identifier for the snapshot of the pod volume.
SnapshotID string `json:"snapshotID"`
// Message is a message about the pod volume backup's status.
Message string `json:"message"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PodVolumeBackup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PodVolumeBackupSpec `json:"spec"`
Status PodVolumeBackupStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodVolumeBackupList is a list of PodVolumeBackups.
type PodVolumeBackupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PodVolumeBackup `json:"items"`
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodVolumeRestoreSpec is the specification for a PodVolumeRestore.
type PodVolumeRestoreSpec struct {
// Pod is a reference to the pod containing the volume to be restored.
Pod corev1api.ObjectReference `json:"pod"`
// Volume is the name of the volume within the Pod to be restored.
Volume string `json:"volume"`
// RepoPrefix is the restic repository prefix (i.e. not containing
// the repository name itself).
RepoPrefix string `json:"repoPrefix"`
// SnapshotID is the ID of the volume snapshot to be restored.
SnapshotID string `json:"snapshotID"`
}
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
type PodVolumeRestorePhase string
const (
PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New"
PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress"
PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed"
PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed"
)
// PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
type PodVolumeRestoreStatus struct {
// Phase is the current state of the PodVolumeRestore.
Phase PodVolumeRestorePhase `json:"phase"`
// Message is a message about the pod volume restore's status.
Message string `json:"message"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PodVolumeRestore struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PodVolumeRestoreSpec `json:"spec"`
Status PodVolumeRestoreStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodVolumeRestoreList is a list of PodVolumeRestores.
type PodVolumeRestoreList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PodVolumeRestore `json:"items"`
}

View File

@ -55,6 +55,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&DownloadRequestList{},
&DeleteBackupRequest{},
&DeleteBackupRequestList{},
&PodVolumeBackup{},
&PodVolumeBackupList{},
&PodVolumeRestore{},
&PodVolumeRestoreList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View File

@ -340,6 +340,7 @@ func (in *Config) DeepCopyInto(out *Config) {
out.BackupSyncPeriod = in.BackupSyncPeriod
out.GCSyncPeriod = in.GCSyncPeriod
out.ScheduleSyncPeriod = in.ScheduleSyncPeriod
out.PodVolumeOperationTimeout = in.PodVolumeOperationTimeout
if in.ResourcePriorities != nil {
in, out := &in.ResourcePriorities, &out.ResourcePriorities
*out = make([]string, len(*in))
@ -647,6 +648,201 @@ func (in *ObjectStorageProviderConfig) DeepCopy() *ObjectStorageProviderConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup.
func (in *PodVolumeBackup) DeepCopy() *PodVolumeBackup {
if in == nil {
return nil
}
out := new(PodVolumeBackup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeBackup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodVolumeBackup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList.
func (in *PodVolumeBackupList) DeepCopy() *PodVolumeBackupList {
if in == nil {
return nil
}
out := new(PodVolumeBackupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeBackupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) {
*out = *in
out.Pod = in.Pod
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec.
func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec {
if in == nil {
return nil
}
out := new(PodVolumeBackupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.
func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus {
if in == nil {
return nil
}
out := new(PodVolumeBackupStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore.
func (in *PodVolumeRestore) DeepCopy() *PodVolumeRestore {
if in == nil {
return nil
}
out := new(PodVolumeRestore)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeRestore) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodVolumeRestore, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList.
func (in *PodVolumeRestoreList) DeepCopy() *PodVolumeRestoreList {
if in == nil {
return nil
}
out := new(PodVolumeRestoreList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) {
*out = *in
out.Pod = in.Pod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec.
func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec {
if in == nil {
return nil
}
out := new(PodVolumeRestoreSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus.
func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus {
if in == nil {
return nil
}
out := new(PodVolumeRestoreStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Restore) DeepCopyInto(out *Restore) {
*out = *in

View File

@ -19,8 +19,10 @@ package backup
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -34,6 +36,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
@ -48,11 +52,13 @@ type Backupper interface {
// kubernetesBackupper implements Backupper.
type kubernetesBackupper struct {
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
podCommandExecutor podCommandExecutor
groupBackupperFactory groupBackupperFactory
snapshotService cloudprovider.SnapshotService
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
podCommandExecutor podexec.PodCommandExecutor
groupBackupperFactory groupBackupperFactory
snapshotService cloudprovider.SnapshotService
resticBackupperFactory restic.BackupperFactory
resticTimeout time.Duration
}
type itemKey struct {
@ -87,15 +93,19 @@ func cohabitatingResources() map[string]*cohabitatingResource {
func NewKubernetesBackupper(
discoveryHelper discovery.Helper,
dynamicFactory client.DynamicFactory,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
snapshotService cloudprovider.SnapshotService,
resticBackupperFactory restic.BackupperFactory,
resticTimeout time.Duration,
) (Backupper, error) {
return &kubernetesBackupper{
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
podCommandExecutor: podCommandExecutor,
groupBackupperFactory: &defaultGroupBackupperFactory{},
snapshotService: snapshotService,
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
podCommandExecutor: podCommandExecutor,
groupBackupperFactory: &defaultGroupBackupperFactory{},
snapshotService: snapshotService,
resticBackupperFactory: resticBackupperFactory,
resticTimeout: resticTimeout,
}, nil
}
@ -232,11 +242,6 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
return err
}
var labelSelector string
if backup.Spec.LabelSelector != nil {
labelSelector = metav1.FormatLabelSelector(backup.Spec.LabelSelector)
}
backedUpItems := make(map[itemKey]struct{})
var errs []error
@ -245,12 +250,32 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
return err
}
podVolumeTimeout := kb.resticTimeout
if val := backup.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" {
parsed, err := time.ParseDuration(val)
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val)
} else {
podVolumeTimeout = parsed
}
}
ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
defer cancelFunc()
var resticBackupper restic.Backupper
if kb.resticBackupperFactory != nil {
resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backup)
if err != nil {
return errors.WithStack(err)
}
}
gb := kb.groupBackupperFactory.newGroupBackupper(
log,
backup,
namespaceIncludesExcludes,
resourceIncludesExcludes,
labelSelector,
kb.dynamicFactory,
kb.discoveryHelper,
backedUpItems,
@ -260,6 +285,7 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
tw,
resourceHooks,
kb.snapshotService,
resticBackupper,
)
for _, group := range kb.discoveryHelper.Resources() {

View File

@ -19,7 +19,6 @@ package backup
import (
"bytes"
"compress/gzip"
"encoding/json"
"io"
"reflect"
"sort"
@ -43,6 +42,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
kubeutil "github.com/heptio/ark/pkg/util/kube"
arktest "github.com/heptio/ark/pkg/util/test"
@ -505,7 +506,7 @@ func TestBackup(t *testing.T) {
dynamicFactory := &arktest.FakeDynamicFactory{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
b, err := NewKubernetesBackupper(
@ -513,6 +514,8 @@ func TestBackup(t *testing.T) {
dynamicFactory,
podCommandExecutor,
nil,
nil, // restic backupper factory
0, // restic timeout
)
require.NoError(t, err)
kb := b.(*kubernetesBackupper)
@ -529,7 +532,6 @@ func TestBackup(t *testing.T) {
test.backup,
test.expectedNamespaces,
test.expectedResources,
test.expectedLabelSelector,
dynamicFactory,
discoveryHelper,
map[itemKey]struct{}{}, // backedUpItems
@ -539,6 +541,7 @@ func TestBackup(t *testing.T) {
mock.Anything, // tarWriter
test.expectedHooks,
mock.Anything,
mock.Anything, // restic backupper
).Return(groupBackupper)
for group, err := range test.backupGroupErrors {
@ -578,7 +581,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
},
}
b, err := NewKubernetesBackupper(discoveryHelper, nil, nil, nil)
b, err := NewKubernetesBackupper(discoveryHelper, nil, nil, nil, nil, 0)
require.NoError(t, err)
kb := b.(*kubernetesBackupper)
@ -594,7 +597,6 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
discoveryHelper,
mock.Anything,
firstCohabitatingResources,
@ -603,6 +605,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(&mockGroupBackupper{})
assert.NoError(t, b.Backup(&v1.Backup{}, &bytes.Buffer{}, &bytes.Buffer{}, nil))
@ -625,7 +628,6 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
discoveryHelper,
mock.Anything,
secondCohabitatingResources,
@ -634,6 +636,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(&mockGroupBackupper{})
assert.NoError(t, b.Backup(&v1.Backup{}, &bytes.Buffer{}, &bytes.Buffer{}, nil))
@ -652,23 +655,22 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) groupBackupper {
args := f.Called(
log,
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -678,6 +680,7 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
tarWriter,
resourceHooks,
snapshotService,
resticBackupper,
)
return args.Get(0).(groupBackupper)
}
@ -691,26 +694,12 @@ func (gb *mockGroupBackupper) backupGroup(group *metav1.APIResourceList) error {
return args.Error(0)
}
func getAsMap(j string) (map[string]interface{}, error) {
m := make(map[string]interface{})
err := json.Unmarshal([]byte(j), &m)
return m, err
}
func toRuntimeObject(t *testing.T, data string) runtime.Object {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
require.NoError(t, err)
return o
}
func unstructuredOrDie(data string) *unstructured.Unstructured {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
if err != nil {
panic(err)
}
return o.(*unstructured.Unstructured)
}
func TestGetResourceHook(t *testing.T) {
tests := []struct {
name string

View File

@ -31,6 +31,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
)
@ -39,16 +41,16 @@ type groupBackupperFactory interface {
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) groupBackupper
}
@ -58,23 +60,22 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper(
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) groupBackupper {
return &defaultGroupBackupper{
log: log,
backup: backup,
namespaces: namespaces,
resources: resources,
labelSelector: labelSelector,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
backedUpItems: backedUpItems,
@ -84,6 +85,7 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper(
tarWriter: tarWriter,
resourceHooks: resourceHooks,
snapshotService: snapshotService,
resticBackupper: resticBackupper,
resourceBackupperFactory: &defaultResourceBackupperFactory{},
}
}
@ -96,16 +98,16 @@ type defaultGroupBackupper struct {
log logrus.FieldLogger
backup *v1.Backup
namespaces, resources *collections.IncludesExcludes
labelSelector string
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
backedUpItems map[itemKey]struct{}
cohabitatingResources map[string]*cohabitatingResource
actions []resolvedAction
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
tarWriter tarWriter
resourceHooks []resourceHook
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
resourceBackupperFactory resourceBackupperFactory
}
@ -119,7 +121,6 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro
gb.backup,
gb.namespaces,
gb.resources,
gb.labelSelector,
gb.dynamicFactory,
gb.discoveryHelper,
gb.backedUpItems,
@ -129,6 +130,7 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro
gb.tarWriter,
gb.resourceHooks,
gb.snapshotService,
gb.resticBackupper,
)
)

View File

@ -23,6 +23,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/sirupsen/logrus"
@ -38,7 +40,6 @@ func TestBackupGroup(t *testing.T) {
namespaces := collections.NewIncludesExcludes().Includes("a")
resources := collections.NewIncludesExcludes().Includes("b")
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@ -64,7 +65,7 @@ func TestBackupGroup(t *testing.T) {
},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@ -78,7 +79,6 @@ func TestBackupGroup(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -87,7 +87,8 @@ func TestBackupGroup(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultGroupBackupper)
resourceBackupperFactory := &mockResourceBackupperFactory{}
@ -102,7 +103,6 @@ func TestBackupGroup(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -112,6 +112,7 @@ func TestBackupGroup(t *testing.T) {
tarWriter,
resourceHooks,
nil,
mock.Anything, // restic backupper
).Return(resourceBackupper)
group := &metav1.APIResourceList{
@ -150,23 +151,22 @@ func (rbf *mockResourceBackupperFactory) newResourceBackupper(
backup *v1.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) resourceBackupper {
args := rbf.Called(
log,
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,

View File

@ -25,18 +25,22 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/heptio/ark/pkg/kuberesource"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/logging"
)
@ -47,12 +51,13 @@ type itemBackupperFactory interface {
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) ItemBackupper
}
@ -63,12 +68,13 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) ItemBackupper {
ib := &defaultItemBackupper{
backup: backup,
@ -84,6 +90,7 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
itemHookHandler: &defaultItemHookHandler{
podCommandExecutor: podCommandExecutor,
},
resticBackupper: resticBackupper,
}
// this is for testing purposes
@ -107,6 +114,7 @@ type defaultItemBackupper struct {
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
itemHookHandler itemHookHandler
additionalItemBackupper ItemBackupper
@ -183,13 +191,26 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
}
}
if groupResource == kuberesource.Pods && len(restic.GetVolumesToBackup(metadata)) > 0 {
var (
updatedObj runtime.Unstructured
errs []error
)
if updatedObj, errs = backupPodVolumes(log, ib.backup, obj, ib.resticBackupper); len(errs) > 0 {
backupErrs = append(backupErrs, errs...)
} else {
obj = updatedObj
}
}
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
if len(backupErrs) != 0 {
return kuberrs.NewAggregate(backupErrs)
return kubeerrs.NewAggregate(backupErrs)
}
var filePath string
@ -223,6 +244,39 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
return nil
}
func backupPodVolumes(log logrus.FieldLogger, backup *api.Backup, obj runtime.Unstructured, backupper restic.Backupper) (runtime.Unstructured, []error) {
if backupper == nil {
log.Warn("No restic backupper, not backing up pod's volumes")
return obj, nil
}
pod := new(corev1api.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
return nil, []error{errors.WithStack(err)}
}
volumeSnapshots, errs := backupper.BackupPodVolumes(backup, pod, log)
if len(errs) > 0 {
return nil, errs
}
if len(volumeSnapshots) == 0 {
return obj, nil
}
// annotate the pod with the successful volume snapshots
for volume, snapshot := range volumeSnapshots {
restic.SetPodSnapshotAnnotation(pod, volume, snapshot)
}
// convert annotated pod back to unstructured to return
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pod)
if err != nil {
return nil, []error{errors.WithStack(err)}
}
return &unstructured.Unstructured{Object: unstructuredObj}, nil
}
func (ib *defaultItemBackupper) executeActions(log logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, name, namespace string, metadata metav1.Object) error {
for _, action := range ib.actions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {

View File

@ -99,7 +99,7 @@ func TestBackupItemSkips(t *testing.T) {
backedUpItems: test.backedUpItems,
}
u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name))
u := arktest.UnstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name))
err := ib.backupItem(arktest.NewLogger(), u, test.groupResource)
assert.NoError(t, err)
})
@ -118,7 +118,7 @@ func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t
resources: collections.NewIncludesExcludes(),
}
u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
u := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"})
assert.NoError(t, err)
}
@ -219,8 +219,8 @@ func TestBackupItemNoSkips(t *testing.T) {
},
},
customActionAdditionalItems: []runtime.Unstructured{
unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
},
},
{
@ -245,8 +245,8 @@ func TestBackupItemNoSkips(t *testing.T) {
},
},
customActionAdditionalItems: []runtime.Unstructured{
unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
},
additionalItemError: errors.New("foo"),
},
@ -300,7 +300,7 @@ func TestBackupItemNoSkips(t *testing.T) {
groupResource = schema.ParseGroupResource(test.groupResource)
}
item, err := getAsMap(test.item)
item, err := arktest.GetAsMap(test.item)
if err != nil {
t.Fatal(err)
}
@ -333,7 +333,7 @@ func TestBackupItemNoSkips(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
dynamicFactory := &arktest.FakeDynamicFactory{}
@ -352,7 +352,8 @@ func TestBackupItemNoSkips(t *testing.T) {
resourceHooks,
dynamicFactory,
discoveryHelper,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultItemBackupper)
var snapshotService *arktest.FakeSnapshotService
@ -426,7 +427,7 @@ func TestBackupItemNoSkips(t *testing.T) {
assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set")
assert.Equal(t, 1, len(w.data), "# of data")
actual, err := getAsMap(string(w.data[0]))
actual, err := arktest.GetAsMap(string(w.data[0]))
if err != nil {
t.Fatal(err)
}
@ -572,7 +573,7 @@ func TestTakePVSnapshot(t *testing.T) {
ib := &defaultItemBackupper{snapshotService: snapshotService}
pv, err := getAsMap(test.pv)
pv, err := arktest.GetAsMap(test.pv)
if err != nil {
t.Fatal(err)
}

View File

@ -21,16 +21,19 @@ import (
"fmt"
"time"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/util/collections"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/util/collections"
)
type hookPhase string
@ -57,7 +60,7 @@ type itemHookHandler interface {
// defaultItemHookHandler is the default itemHookHandler.
type defaultItemHookHandler struct {
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
}
func (h *defaultItemHookHandler) handleHooks(
@ -94,7 +97,7 @@ func (h *defaultItemHookHandler) handleHooks(
"hookPhase": phase,
},
)
if err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "<from-annotation>", hookFromAnnotations); err != nil {
if err := h.podCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "<from-annotation>", hookFromAnnotations); err != nil {
hookLog.WithError(err).Error("Error executing hook")
if hookFromAnnotations.OnError == api.HookErrorModeFail {
return err
@ -127,7 +130,7 @@ func (h *defaultItemHookHandler) handleHooks(
"hookPhase": phase,
},
)
err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec)
err := h.podCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec)
if err != nil {
hookLog.WithError(err).Error("Error executing hook")
if hook.Exec.OnError == api.HookErrorModeFail {
@ -147,8 +150,6 @@ const (
podBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command"
podBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error"
podBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout"
defaultHookOnError = api.HookErrorModeFail
defaultHookTimeout = 30 * time.Second
)
func phasedKey(phase hookPhase, key string) string {

View File

@ -57,7 +57,7 @@ func TestHandleHooksSkips(t *testing.T) {
},
{
name: "pod without annotation / no spec hooks",
item: unstructuredOrDie(
item: arktest.UnstructuredOrDie(
`
{
"apiVersion": "v1",
@ -73,7 +73,7 @@ func TestHandleHooksSkips(t *testing.T) {
{
name: "spec hooks not applicable",
groupResource: "pods",
item: unstructuredOrDie(
item: arktest.UnstructuredOrDie(
`
{
"apiVersion": "v1",
@ -114,7 +114,7 @@ func TestHandleHooksSkips(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
h := &defaultItemHookHandler{
@ -144,7 +144,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, no annotation, spec (multiple pre hooks) = run spec",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -194,7 +194,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, no annotation, spec (multiple post hooks) = run spec",
phase: hookPhasePost,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -244,7 +244,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (legacy), no spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -266,7 +266,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (pre), no spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -288,7 +288,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (post), no spec = run annotation",
phase: hookPhasePost,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -310,7 +310,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation & spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -345,7 +345,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation, onError=fail = return error",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -371,7 +371,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation, onError=continue = return nil",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -397,7 +397,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, spec, onError=fail = don't run other hooks",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -459,7 +459,7 @@ func TestHandleHooks(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
h := &defaultItemHookHandler{
@ -467,20 +467,20 @@ func TestHandleHooks(t *testing.T) {
}
if test.expectedPodHook != nil {
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "<from-annotation>", test.expectedPodHook).Return(test.expectedPodHookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "<from-annotation>", test.expectedPodHook).Return(test.expectedPodHookError)
} else {
hookLoop:
for _, resourceHook := range test.hooks {
for _, hook := range resourceHook.pre {
hookError := test.hookErrorsByContainer[hook.Exec.Container]
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail {
break hookLoop
}
}
for _, hook := range resourceHook.post {
hookError := test.hookErrorsByContainer[hook.Exec.Container]
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail {
break hookLoop
}

View File

@ -46,7 +46,7 @@ func TestPodActionExecute(t *testing.T) {
}{
{
name: "no spec.volumes",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -59,7 +59,7 @@ func TestPodActionExecute(t *testing.T) {
},
{
name: "persistentVolumeClaim without claimName",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@ -79,7 +79,7 @@ func TestPodActionExecute(t *testing.T) {
},
{
name: "full test, mix of volume types",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",

View File

@ -17,20 +17,24 @@ limitations under the License.
package backup
import (
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/util/collections"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
)
type resourceBackupperFactory interface {
@ -39,16 +43,16 @@ type resourceBackupperFactory interface {
backup *api.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) resourceBackupper
}
@ -59,23 +63,22 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
backup *api.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) resourceBackupper {
return &defaultResourceBackupper{
log: log,
backup: backup,
namespaces: namespaces,
resources: resources,
labelSelector: labelSelector,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
backedUpItems: backedUpItems,
@ -85,6 +88,7 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
tarWriter: tarWriter,
resourceHooks: resourceHooks,
snapshotService: snapshotService,
resticBackupper: resticBackupper,
itemBackupperFactory: &defaultItemBackupperFactory{},
}
}
@ -98,16 +102,16 @@ type defaultResourceBackupper struct {
backup *api.Backup
namespaces *collections.IncludesExcludes
resources *collections.IncludesExcludes
labelSelector string
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
backedUpItems map[itemKey]struct{}
cohabitatingResources map[string]*cohabitatingResource
actions []resolvedAction
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
tarWriter tarWriter
resourceHooks []resourceHook
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
itemBackupperFactory itemBackupperFactory
}
@ -182,6 +186,7 @@ func (rb *defaultResourceBackupper) backupResource(
rb.dynamicFactory,
rb.discoveryHelper,
rb.snapshotService,
rb.resticBackupper,
)
namespacesToList := getNamespacesToList(rb.namespaces)
@ -235,8 +240,13 @@ func (rb *defaultResourceBackupper) backupResource(
return err
}
var labelSelector string
if selector := rb.backup.Spec.LabelSelector; selector != nil {
labelSelector = metav1.FormatLabelSelector(selector)
}
log.WithField("namespace", namespace).Info("Listing items")
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: rb.labelSelector})
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return errors.WithStack(err)
}

View File

@ -24,6 +24,8 @@ import (
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/stretchr/testify/assert"
@ -74,8 +76,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`),
},
},
},
@ -90,12 +92,12 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`),
},
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`),
},
},
},
@ -110,8 +112,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@ -127,8 +129,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@ -166,8 +168,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@ -194,8 +196,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@ -211,8 +213,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},
expectSkip: false,
getResponses: []*unstructured.Unstructured{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
},
},
}
@ -224,8 +226,6 @@ func TestBackupResource(t *testing.T) {
},
}
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@ -251,7 +251,7 @@ func TestBackupResource(t *testing.T) {
{name: "myhook"},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@ -262,7 +262,6 @@ func TestBackupResource(t *testing.T) {
backup,
test.namespaces,
test.resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -271,7 +270,8 @@ func TestBackupResource(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@ -294,6 +294,7 @@ func TestBackupResource(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
if len(test.listResponses) > 0 {
@ -310,7 +311,7 @@ func TestBackupResource(t *testing.T) {
list.Items = append(list.Items, *item)
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
}
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
client.On("List", metav1.ListOptions{}).Return(list, nil)
}
}
@ -379,13 +380,19 @@ func TestBackupResourceCohabitation(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
backup := &v1.Backup{}
backup := &v1.Backup{
Spec: v1.BackupSpec{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
},
}
namespaces := collections.NewIncludesExcludes().Includes("*")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@ -411,7 +418,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
{name: "myhook"},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@ -421,7 +428,6 @@ func TestBackupResourceCohabitation(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -430,7 +436,8 @@ func TestBackupResourceCohabitation(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@ -451,7 +458,8 @@ func TestBackupResourceCohabitation(t *testing.T) {
resourceHooks,
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything, // snapshot service
mock.Anything, // restic backupper
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@ -459,7 +467,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
// STEP 1: make sure the initial backup goes through
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion1, test.apiResource, "").Return(client, nil)
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(&unstructured.UnstructuredList{}, nil)
client.On("List", metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(backup.Spec.LabelSelector)}).Return(&unstructured.UnstructuredList{}, nil)
// STEP 2: do the backup
err := rb.backupResource(test.apiGroup1, test.apiResource)
@ -478,7 +486,6 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
namespaces := collections.NewIncludesExcludes().Includes("ns-1")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
backedUpItems := map[itemKey]struct{}{}
dynamicFactory := &arktest.FakeDynamicFactory{}
@ -492,7 +499,7 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@ -502,7 +509,6 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -511,7 +517,8 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@ -547,6 +554,7 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@ -554,7 +562,7 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
client.On("Get", "ns-1", metav1.GetOptions{}).Return(ns1, nil)
itemHookHandler.On("handleHooks", mock.Anything, schema.GroupResource{Group: "", Resource: "namespaces"}, ns1, resourceHooks, hookPhasePre).Return(nil)
@ -568,12 +576,19 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
}
func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
backup := &v1.Backup{}
backup := &v1.Backup{
Spec: v1.BackupSpec{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
},
}
namespaces := collections.NewIncludesExcludes().Excludes("ns-1")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
backedUpItems := map[itemKey]struct{}{}
dynamicFactory := &arktest.FakeDynamicFactory{}
@ -587,7 +602,7 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@ -597,7 +612,6 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@ -606,7 +620,8 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@ -631,6 +646,7 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@ -639,12 +655,12 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns2 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns2 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
list := &unstructured.UnstructuredList{
Items: []unstructured.Unstructured{*ns1, *ns2},
}
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
client.On("List", metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(backup.Spec.LabelSelector)}).Return(list, nil)
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, kuberesource.Namespaces).Return(nil)
@ -661,12 +677,13 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
) ItemBackupper {
args := ibf.Called(
backup,
@ -680,6 +697,7 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
dynamicFactory,
discoveryHelper,
snapshotService,
resticBackupper,
)
return args.Get(0).(ItemBackupper)
}

View File

@ -66,7 +66,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
}{
{
name: "no crbs",
serviceAccount: unstructuredOrDie(`
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
@ -81,7 +81,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
},
{
name: "no matching crbs",
serviceAccount: unstructuredOrDie(`
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
@ -124,7 +124,7 @@ func TestServiceAccountActionExecute(t *testing.T) {
},
{
name: "some matching crbs",
serviceAccount: unstructuredOrDie(`
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",

View File

@ -30,8 +30,10 @@ import (
"github.com/heptio/ark/pkg/cmd/cli/describe"
"github.com/heptio/ark/pkg/cmd/cli/get"
"github.com/heptio/ark/pkg/cmd/cli/plugin"
"github.com/heptio/ark/pkg/cmd/cli/restic"
"github.com/heptio/ark/pkg/cmd/cli/restore"
"github.com/heptio/ark/pkg/cmd/cli/schedule"
"github.com/heptio/ark/pkg/cmd/daemonset"
"github.com/heptio/ark/pkg/cmd/server"
runplugin "github.com/heptio/ark/pkg/cmd/server/plugin"
"github.com/heptio/ark/pkg/cmd/version"
@ -67,6 +69,8 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
delete.NewCommand(f),
cliclient.NewCommand(),
completion.NewCommand(),
daemonset.NewCommand(f),
restic.NewCommand(f),
)
// add the glog flags

View File

@ -0,0 +1,122 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"crypto/rand"
"io/ioutil"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/restic"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kclientset "k8s.io/client-go/kubernetes"
)
func NewInitRepositoryCommand(f client.Factory) *cobra.Command {
o := NewInitRepositoryOptions()
c := &cobra.Command{
Use: "init-repository",
Short: "create an encryption key for a restic repository",
Long: "create an encryption key for a restic repository",
Run: func(c *cobra.Command, args []string) {
cmd.CheckError(o.Complete(f))
cmd.CheckError(o.Validate(f))
cmd.CheckError(o.Run(f))
},
}
o.BindFlags(c.Flags())
return c
}
type InitRepositoryOptions struct {
Namespace string
KeyFile string
KeyData string
KeySize int
kubeClient kclientset.Interface
keyBytes []byte
}
func NewInitRepositoryOptions() *InitRepositoryOptions {
return &InitRepositoryOptions{
KeySize: 1024,
}
}
func (o *InitRepositoryOptions) BindFlags(flags *pflag.FlagSet) {
flags.StringVar(&o.KeyFile, "key-file", o.KeyFile, "Path to file containing the encryption key for the restic repository. Optional; if unset, Ark will generate a random key for you.")
flags.StringVar(&o.KeyData, "key-data", o.KeyData, "Encryption key for the restic repository. Optional; if unset, Ark will generate a random key for you.")
flags.IntVar(&o.KeySize, "key-size", o.KeySize, "Size of the generated key for the restic repository")
}
func (o *InitRepositoryOptions) Complete(f client.Factory) error {
if o.KeyFile != "" && o.KeyData != "" {
return errors.Errorf("only one of --key-file and --key-data may be specified")
}
if o.KeyFile == "" && o.KeyData == "" && o.KeySize < 1 {
return errors.Errorf("--key-size must be at least 1")
}
o.Namespace = f.Namespace()
if o.KeyFile != "" {
data, err := ioutil.ReadFile(o.KeyFile)
if err != nil {
return err
}
o.keyBytes = data
}
if len(o.KeyData) == 0 {
o.keyBytes = make([]byte, o.KeySize)
// rand.Reader always returns a nil error
_, _ = rand.Read(o.keyBytes)
}
return nil
}
func (o *InitRepositoryOptions) Validate(f client.Factory) error {
if len(o.keyBytes) == 0 {
return errors.Errorf("keyBytes is required")
}
kubeClient, err := f.KubeClient()
if err != nil {
return err
}
o.kubeClient = kubeClient
if _, err := kubeClient.CoreV1().Namespaces().Get(o.Namespace, metav1.GetOptions{}); err != nil {
return err
}
return nil
}
func (o *InitRepositoryOptions) Run(f client.Factory) error {
return restic.NewRepositoryKey(o.kubeClient.CoreV1(), o.Namespace, o.keyBytes)
}

View File

@ -0,0 +1,37 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "restic",
Short: "Work with restic repositories",
Long: "Work with restic repositories",
}
c.AddCommand(
NewInitRepositoryCommand(f),
)
return c
}

View File

@ -0,0 +1,156 @@
package daemonset
import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/signals"
"github.com/heptio/ark/pkg/controller"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/util/logging"
)
func NewCommand(f client.Factory) *cobra.Command {
var logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
var command = &cobra.Command{
Use: "daemonset",
Short: "Run the ark daemonset",
Long: "Run the ark daemonset",
Run: func(c *cobra.Command, args []string) {
logLevel := logLevelFlag.Parse()
logrus.Infof("setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := logging.DefaultLogger(logLevel)
logger.Infof("Starting Ark restic daemonset %s", buildinfo.FormattedGitSHA())
s, err := newDaemonServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
cmd.CheckError(err)
s.run()
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
return command
}
type daemonServer struct {
kubeClient kubernetes.Interface
arkClient clientset.Interface
arkInformerFactory informers.SharedInformerFactory
kubeInformerFactory kubeinformers.SharedInformerFactory
podInformer cache.SharedIndexInformer
logger logrus.FieldLogger
ctx context.Context
cancelFunc context.CancelFunc
}
func newDaemonServer(logger logrus.FieldLogger, baseName string) (*daemonServer, error) {
clientConfig, err := client.Config("", "", baseName)
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
arkClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
// use a stand-alone pod informer because we want to use a field selector to
// filter to only pods scheduled on this node.
podInformer := corev1informers.NewFilteredPodInformer(
kubeClient,
"",
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("spec.nodeName=%s", os.Getenv("NODE_NAME"))
},
)
ctx, cancelFunc := context.WithCancel(context.Background())
return &daemonServer{
kubeClient: kubeClient,
arkClient: arkClient,
arkInformerFactory: informers.NewFilteredSharedInformerFactory(arkClient, 0, os.Getenv("HEPTIO_ARK_NAMESPACE"), nil),
kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0),
podInformer: podInformer,
logger: logger,
ctx: ctx,
cancelFunc: cancelFunc,
}, nil
}
func (s *daemonServer) run() {
signals.CancelOnShutdown(s.cancelFunc, s.logger)
s.logger.Info("Starting controllers")
var wg sync.WaitGroup
backupController := controller.NewPodVolumeBackupController(
s.logger,
s.arkInformerFactory.Ark().V1().PodVolumeBackups(),
s.arkClient.ArkV1(),
s.podInformer,
s.kubeInformerFactory.Core().V1().Secrets(),
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
backupController.Run(s.ctx, 1)
}()
restoreController := controller.NewPodVolumeRestoreController(
s.logger,
s.arkInformerFactory.Ark().V1().PodVolumeRestores(),
s.arkClient.ArkV1(),
s.podInformer,
s.kubeInformerFactory.Core().V1().Secrets(),
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
restoreController.Run(s.ctx, 1)
}()
go s.arkInformerFactory.Start(s.ctx.Done())
go s.kubeInformerFactory.Start(s.ctx.Done())
go s.podInformer.Run(s.ctx.Done())
s.logger.Info("Controllers started successfully")
<-s.ctx.Done()
s.logger.Info("Waiting for all controllers to shut down gracefully")
wg.Wait()
}

View File

@ -106,6 +106,8 @@ func NewCommand(f client.Factory) *cobra.Command {
action = restore.NewPodAction(logger)
case "svc":
action = restore.NewServiceAction(logger)
case "restic":
action = restore.NewResticRestoreAction(logger)
default:
logger.Fatal("Unrecognized plugin name")
}

View File

@ -22,15 +22,11 @@ import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"reflect"
"sort"
"strings"
"sync"
"syscall"
"time"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -44,23 +40,25 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
kcorev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/flag"
"github.com/heptio/ark/pkg/cmd/util/signals"
"github.com/heptio/ark/pkg/controller"
arkdiscovery "github.com/heptio/ark/pkg/discovery"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/restore"
"github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
@ -69,9 +67,8 @@ import (
func NewCommand() *cobra.Command {
var (
sortedLogLevels = getSortedLogLevels()
logLevelFlag = flag.NewEnum(logrus.InfoLevel.String(), sortedLogLevels...)
pluginDir = "/plugins"
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
pluginDir = "/plugins"
)
var command = &cobra.Command{
@ -79,19 +76,10 @@ func NewCommand() *cobra.Command {
Short: "Run the ark server",
Long: "Run the ark server",
Run: func(c *cobra.Command, args []string) {
logLevel := logrus.InfoLevel
if parsed, err := logrus.ParseLevel(logLevelFlag.String()); err == nil {
logLevel = parsed
} else {
// This should theoretically never happen assuming the enum flag
// is constructed correctly because the enum flag will not allow
// an invalid value to be set.
logrus.Errorf("log-level flag has invalid value %s", strings.ToUpper(logLevelFlag.String()))
}
logLevel := logLevelFlag.Parse()
logrus.Infof("setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := newLogger(logLevel, &logging.ErrorLocationHook{}, &logging.LogLocationHook{})
logger := logging.DefaultLogger(logLevel)
logger.Infof("Starting Ark server %s", buildinfo.FormattedGitSHA())
// NOTE: the namespace flag is bound to ark's persistent flags when the root ark command
@ -109,14 +97,13 @@ func NewCommand() *cobra.Command {
namespace := getServerNamespace(namespaceFlag)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), pluginDir, logger)
cmd.CheckError(err)
cmd.CheckError(s.run())
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(sortedLogLevels, ", ")))
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&pluginDir, "plugin-dir", pluginDir, "directory containing Ark plugins")
return command
@ -136,42 +123,12 @@ func getServerNamespace(namespaceFlag *pflag.Flag) string {
return api.DefaultNamespace
}
func newLogger(level logrus.Level, hooks ...logrus.Hook) *logrus.Logger {
logger := logrus.New()
logger.Level = level
for _, hook := range hooks {
logger.Hooks.Add(hook)
}
return logger
}
// getSortedLogLevels returns a string slice containing all of the valid logrus
// log levels (based on logrus.AllLevels), sorted in ascending order of severity.
func getSortedLogLevels() []string {
var (
sortedLogLevels = make([]logrus.Level, len(logrus.AllLevels))
logLevelsStrings []string
)
copy(sortedLogLevels, logrus.AllLevels)
// logrus.Panic has the lowest value, so the compare function uses ">"
sort.Slice(sortedLogLevels, func(i, j int) bool { return sortedLogLevels[i] > sortedLogLevels[j] })
for _, level := range sortedLogLevels {
logLevelsStrings = append(logLevelsStrings, level.String())
}
return logLevelsStrings
}
type server struct {
namespace string
kubeClientConfig *rest.Config
kubeClient kubernetes.Interface
arkClient clientset.Interface
objectStore cloudprovider.ObjectStore
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
discoveryClient discovery.DiscoveryInterface
@ -181,6 +138,7 @@ type server struct {
cancelFunc context.CancelFunc
logger logrus.FieldLogger
pluginManager plugin.Manager
resticManager restic.RepositoryManager
}
func newServer(namespace, baseName, pluginDir string, logger *logrus.Logger) (*server, error) {
@ -225,7 +183,8 @@ func newServer(namespace, baseName, pluginDir string, logger *logrus.Logger) (*s
func (s *server) run() error {
defer s.pluginManager.CleanupClients()
s.handleShutdownSignals()
signals.CancelOnShutdown(s.cancelFunc, s.logger)
if err := s.ensureArkNamespace(); err != nil {
return err
@ -251,6 +210,21 @@ func (s *server) run() error {
return err
}
if config.BackupStorageProvider.ResticLocation != "" {
if err := s.initRestic(config.BackupStorageProvider); err != nil {
return err
}
s.runResticMaintenance()
// warn if restic daemonset does not exist
_, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get("restic", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
s.logger.Warn("Ark restic DaemonSet not found; restic backups will fail until it's created")
} else if err != nil {
return errors.WithStack(err)
}
}
if err := s.runControllers(config); err != nil {
return err
}
@ -258,6 +232,20 @@ func (s *server) run() error {
return nil
}
func (s *server) runResticMaintenance() {
go func() {
interval := time.Hour
<-time.After(interval)
wait.Forever(func() {
if err := s.resticManager.PruneAllRepos(); err != nil {
s.logger.WithError(err).Error("error pruning repos")
}
}, interval)
}()
}
func (s *server) ensureArkNamespace() error {
logContext := s.logger.WithField("namespace", s.namespace)
@ -301,11 +289,21 @@ func (s *server) loadConfig() (*api.Config, error) {
}
const (
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
defaultPodVolumeOperationTimeout = 60 * time.Minute
)
// - Namespaces go first because all namespaced resources depend on them.
// - PVs go before PVCs because PVCs depend on them.
// - PVCs go before pods or controllers so they can be mounted as volumes.
// - Secrets and config maps go before pods or controllers so they can be mounted
// as volumes.
// - Service accounts go before pods or controllers so pods can use them.
// - Limit ranges go before pods or controllers so pods can use them.
// - Pods go before controllers so they can be explicitly restored and potentially
// have restic restores run before controllers adopt the pods.
var defaultResourcePriorities = []string{
"namespaces",
"persistentvolumes",
@ -314,6 +312,7 @@ var defaultResourcePriorities = []string{
"configmaps",
"serviceaccounts",
"limitranges",
"pods",
}
func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
@ -329,6 +328,10 @@ func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
c.ScheduleSyncPeriod.Duration = defaultScheduleSyncPeriod
}
if c.PodVolumeOperationTimeout.Duration == 0 {
c.PodVolumeOperationTimeout.Duration = defaultPodVolumeOperationTimeout
}
if len(c.ResourcePriorities) == 0 {
c.ResourcePriorities = defaultResourcePriorities
logger.WithField("priorities", c.ResourcePriorities).Info("Using default resource priorities")
@ -379,17 +382,6 @@ func (s *server) watchConfig(config *api.Config) {
})
}
func (s *server) handleShutdownSignals() {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigs
s.logger.Infof("Received signal %s, gracefully shutting down", sig)
s.cancelFunc()
}()
}
func (s *server) initBackupService(config *api.Config) error {
s.logger.Info("Configuring cloud provider for backup service")
objectStore, err := getObjectStore(config.BackupStorageProvider.CloudProviderConfig, s.pluginManager)
@ -397,6 +389,7 @@ func (s *server) initBackupService(config *api.Config) error {
return err
}
s.objectStore = objectStore
s.backupService = cloudprovider.NewBackupService(objectStore, s.logger)
return nil
}
@ -457,6 +450,42 @@ func durationMin(a, b time.Duration) time.Duration {
return b
}
func (s *server) initRestic(config api.ObjectStorageProviderConfig) error {
// set the env vars that restic uses for creds purposes
if config.Name == string(restic.AzureBackend) {
os.Setenv("AZURE_ACCOUNT_NAME", os.Getenv("AZURE_STORAGE_ACCOUNT_ID"))
os.Setenv("AZURE_ACCOUNT_KEY", os.Getenv("AZURE_STORAGE_KEY"))
}
secretsInformer := corev1informers.NewFilteredSecretInformer(
s.kubeClient,
"",
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("metadata.name=%s", restic.CredentialsSecretName)
},
)
go secretsInformer.Run(s.ctx.Done())
res, err := restic.NewRepositoryManager(
s.ctx,
s.objectStore,
config,
s.arkClient,
secretsInformer,
s.kubeClient.CoreV1(),
s.logger,
)
if err != nil {
return err
}
s.resticManager = res
s.logger.Info("Checking restic repositories")
return s.resticManager.CheckAllRepos()
}
func (s *server) runControllers(config *api.Config) error {
s.logger.Info("Starting controllers")
@ -505,8 +534,16 @@ func (s *server) runControllers(config *api.Config) error {
} else {
backupTracker := controller.NewBackupTracker()
backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService, s.kubeClientConfig, s.kubeClient.CoreV1())
backupper, err := backup.NewKubernetesBackupper(
discoveryHelper,
client.NewDynamicFactory(s.clientPool),
podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()),
s.snapshotService,
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
)
cmd.CheckError(err)
backupController := controller.NewBackupController(
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
@ -561,6 +598,8 @@ func (s *server) runControllers(config *api.Config) error {
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(), // restoreClient
backupTracker,
s.resticManager,
s.sharedInformerFactory.Ark().V1().PodVolumeBackups(),
)
wg.Add(1)
go func() {
@ -570,14 +609,16 @@ func (s *server) runControllers(config *api.Config) error {
}
restorer, err := newRestorer(
restorer, err := restore.NewKubernetesRestorer(
discoveryHelper,
s.clientPool,
client.NewDynamicFactory(s.clientPool),
s.backupService,
s.snapshotService,
config.ResourcePriorities,
s.arkClient.ArkV1(),
s.kubeClient,
s.kubeClient.CoreV1().Namespaces(),
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
s.logger,
)
cmd.CheckError(err)
@ -670,41 +711,3 @@ func (s *server) removeDeprecatedGCFinalizer() {
}
}
}
func newBackupper(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
kubeClientConfig *rest.Config,
kubeCoreV1Client kcorev1client.CoreV1Interface,
) (backup.Backupper, error) {
return backup.NewKubernetesBackupper(
discoveryHelper,
client.NewDynamicFactory(clientPool),
backup.NewPodCommandExecutor(kubeClientConfig, kubeCoreV1Client.RESTClient()),
snapshotService,
)
}
func newRestorer(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
kubeClient kubernetes.Interface,
logger logrus.FieldLogger,
) (restore.Restorer, error) {
return restore.NewKubernetesRestorer(
discoveryHelper,
client.NewDynamicFactory(clientPool),
backupService,
snapshotService,
resourcePriorities,
backupClient,
kubeClient.CoreV1().Namespaces(),
logger,
)
}

View File

@ -18,15 +18,13 @@ package flag
import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/sets"
)
// Enum is a Cobra-compatible wrapper for defining
// a string flag that can be one of a specified set
// of values.
type Enum struct {
allowedValues sets.String
allowedValues []string
value string
}
@ -35,7 +33,7 @@ type Enum struct {
// none is set.
func NewEnum(defaultValue string, allowedValues ...string) *Enum {
return &Enum{
allowedValues: sets.NewString(allowedValues...),
allowedValues: allowedValues,
value: defaultValue,
}
}
@ -50,12 +48,14 @@ func (e *Enum) String() string {
// receiver. It returns an error if the string
// is not an allowed value.
func (e *Enum) Set(s string) error {
if !e.allowedValues.Has(s) {
return errors.Errorf("invalid value: %q", s)
for _, val := range e.allowedValues {
if val == s {
e.value = s
return nil
}
}
e.value = s
return nil
return errors.Errorf("invalid value: %q", s)
}
// Type returns a string representation of the
@ -66,3 +66,9 @@ func (e *Enum) Type() string {
// the possible options.
return ""
}
// AllowedValues returns a slice of the flag's valid
// values.
func (e *Enum) AllowedValues() []string {
return e.allowedValues
}

View File

@ -0,0 +1,39 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package signals
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/sirupsen/logrus"
)
// CancelOnShutdown starts a goroutine that will call cancelFunc when
// either SIGINT or SIGTERM is received
func CancelOnShutdown(cancelFunc context.CancelFunc, logger logrus.FieldLogger) {
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigs
logger.Infof("Received signal %s, shutting down", sig)
cancelFunc()
}()
}

View File

@ -27,6 +27,7 @@ import (
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/kube"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -50,6 +51,8 @@ type backupDeletionController struct {
restoreLister listers.RestoreLister
restoreClient arkv1client.RestoresGetter
backupTracker BackupTracker
resticMgr restic.RepositoryManager
podvolumeBackupLister listers.PodVolumeBackupLister
processRequestFunc func(*v1.DeleteBackupRequest) error
clock clock.Clock
@ -67,6 +70,8 @@ func NewBackupDeletionController(
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
backupTracker BackupTracker,
resticMgr restic.RepositoryManager,
podvolumeBackupInformer informers.PodVolumeBackupInformer,
) Interface {
c := &backupDeletionController{
genericController: newGenericController("backup-deletion", logger),
@ -79,11 +84,18 @@ func NewBackupDeletionController(
restoreLister: restoreInformer.Lister(),
restoreClient: restoreClient,
backupTracker: backupTracker,
clock: &clock.RealClock{},
resticMgr: resticMgr,
podvolumeBackupLister: podvolumeBackupInformer.Lister(),
clock: &clock.RealClock{},
}
c.syncHandler = c.processQueueItem
c.cacheSyncWaiters = append(c.cacheSyncWaiters, deleteBackupRequestInformer.Informer().HasSynced, restoreInformer.Informer().HasSynced)
c.cacheSyncWaiters = append(
c.cacheSyncWaiters,
deleteBackupRequestInformer.Informer().HasSynced,
restoreInformer.Informer().HasSynced,
podvolumeBackupInformer.Informer().HasSynced,
)
c.processRequestFunc = c.processRequest
deleteBackupRequestInformer.Informer().AddEventHandler(
@ -225,6 +237,18 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
}
}
// Try to delete restic snapshots
log.Info("Removing restic snapshots")
if snapshots, err := restic.GetSnapshotsInBackup(backup, c.podvolumeBackupLister); err != nil {
errs = append(errs, err.Error())
} else {
for _, snapshot := range snapshots {
if err := c.resticMgr.Forget(snapshot); err != nil {
errs = append(errs, err.Error())
}
}
}
// Try to delete backup from object storage
log.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {

View File

@ -64,6 +64,8 @@ func TestBackupDeletionControllerControllerHasUpdateFunc(t *testing.T) {
sharedInformers.Ark().V1().Restores(),
client.ArkV1(), // restoreClient
NewBackupTracker(),
nil, // restic repository manager
sharedInformers.Ark().V1().PodVolumeBackups(),
).(*backupDeletionController)
// disable resync handler since we don't want to test it here
@ -117,6 +119,8 @@ func TestBackupDeletionControllerProcessQueueItem(t *testing.T) {
sharedInformers.Ark().V1().Restores(),
client.ArkV1(), // restoreClient
NewBackupTracker(),
nil, // restic repository manager
sharedInformers.Ark().V1().PodVolumeBackups(),
).(*backupDeletionController)
// Error splitting key
@ -198,6 +202,8 @@ func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletio
sharedInformers.Ark().V1().Restores(),
client.ArkV1(), // restoreClient
NewBackupTracker(),
nil, // restic repository manager
sharedInformers.Ark().V1().PodVolumeBackups(),
).(*backupDeletionController),
req: req,
@ -577,6 +583,8 @@ func TestBackupDeletionControllerDeleteExpiredRequests(t *testing.T) {
sharedInformers.Ark().V1().Restores(),
client.ArkV1(), // restoreClient
NewBackupTracker(),
nil,
sharedInformers.Ark().V1().PodVolumeBackups(),
).(*backupDeletionController)
fakeClock := &clock.FakeClock{}

View File

@ -0,0 +1,299 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
corev1informers "k8s.io/client-go/informers/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/kube"
)
type podVolumeBackupController struct {
*genericController
podVolumeBackupClient arkv1client.PodVolumeBackupsGetter
podVolumeBackupLister listers.PodVolumeBackupLister
secretLister corev1listers.SecretLister
podLister corev1listers.PodLister
pvcLister corev1listers.PersistentVolumeClaimLister
nodeName string
processBackupFunc func(*arkv1api.PodVolumeBackup) error
}
// NewPodVolumeBackupController creates a new pod volume backup controller.
func NewPodVolumeBackupController(
logger logrus.FieldLogger,
podVolumeBackupInformer informers.PodVolumeBackupInformer,
podVolumeBackupClient arkv1client.PodVolumeBackupsGetter,
podInformer cache.SharedIndexInformer,
secretInformer corev1informers.SecretInformer,
pvcInformer corev1informers.PersistentVolumeClaimInformer,
nodeName string,
) Interface {
c := &podVolumeBackupController{
genericController: newGenericController("pod-volume-backup", logger),
podVolumeBackupClient: podVolumeBackupClient,
podVolumeBackupLister: podVolumeBackupInformer.Lister(),
podLister: corev1listers.NewPodLister(podInformer.GetIndexer()),
secretLister: secretInformer.Lister(),
pvcLister: pvcInformer.Lister(),
nodeName: nodeName,
}
c.syncHandler = c.processQueueItem
c.cacheSyncWaiters = append(
c.cacheSyncWaiters,
podVolumeBackupInformer.Informer().HasSynced,
secretInformer.Informer().HasSynced,
podInformer.HasSynced,
pvcInformer.Informer().HasSynced,
)
c.processBackupFunc = c.processBackup
podVolumeBackupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.enqueue,
UpdateFunc: func(_, obj interface{}) { c.enqueue(obj) },
},
)
return c
}
func (c *podVolumeBackupController) processQueueItem(key string) error {
log := c.logger.WithField("key", key)
log.Debug("Running processItem")
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.WithError(err).Error("error splitting queue key")
return nil
}
req, err := c.podVolumeBackupLister.PodVolumeBackups(ns).Get(name)
if apierrors.IsNotFound(err) {
log.Debug("Unable to find PodVolumeBackup")
return nil
}
if err != nil {
return errors.Wrap(err, "error getting PodVolumeBackup")
}
// only process new items
switch req.Status.Phase {
case "", arkv1api.PodVolumeBackupPhaseNew:
default:
return nil
}
// only process items for this node
if req.Spec.Node != c.nodeName {
return nil
}
// Don't mutate the shared cache
reqCopy := req.DeepCopy()
return c.processBackupFunc(reqCopy)
}
func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) error {
log := c.logger.WithFields(logrus.Fields{
"namespace": req.Namespace,
"name": req.Name,
})
var err error
// update status to InProgress
req, err = c.patchPodVolumeBackup(req, updatePhaseFunc(arkv1api.PodVolumeBackupPhaseInProgress))
if err != nil {
log.WithError(err).Error("Error setting phase to InProgress")
return errors.WithStack(err)
}
pod, err := c.podLister.Pods(req.Spec.Pod.Namespace).Get(req.Spec.Pod.Name)
if err != nil {
log.WithError(err).Errorf("Error getting pod %s/%s", req.Spec.Pod.Namespace, req.Spec.Pod.Name)
return c.fail(req, errors.Wrap(err, "error getting pod").Error(), log)
}
volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister)
if err != nil {
log.WithError(err).Error("Error getting volume directory name")
return c.fail(req, errors.Wrap(err, "error getting volume directory name").Error(), log)
}
path, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir))
if err != nil {
log.WithError(err).Error("Error uniquely identifying volume path")
return c.fail(req, errors.Wrap(err, "error getting volume path on host").Error(), log)
}
// temp creds
file, err := restic.TempCredentialsFile(c.secretLister, req.Spec.Pod.Namespace)
if err != nil {
log.WithError(err).Error("Error creating temp restic credentials file")
return c.fail(req, errors.Wrap(err, "error creating temp restic credentials file").Error(), log)
}
// ignore error since there's nothing we can do and it's a temp file.
defer os.Remove(file)
resticCmd := restic.BackupCommand(
req.Spec.RepoPrefix,
req.Spec.Pod.Namespace,
file,
path,
req.Spec.Tags,
)
var stdout, stderr string
if stdout, stderr, err = runCommand(resticCmd.Cmd()); err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error running command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
return c.fail(req, fmt.Sprintf("error running restic backup, stderr=%s: %s", stderr, err.Error()), log)
}
log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
snapshotID, err := restic.GetSnapshotID(req.Spec.RepoPrefix, req.Spec.Pod.Namespace, file, req.Spec.Tags)
if err != nil {
log.WithError(err).Error("Error getting SnapshotID")
return c.fail(req, errors.Wrap(err, "error getting snapshot id").Error(), log)
}
// update status to Completed with path & snapshot id
req, err = c.patchPodVolumeBackup(req, func(r *arkv1api.PodVolumeBackup) {
r.Status.Path = path
r.Status.SnapshotID = snapshotID
r.Status.Phase = arkv1api.PodVolumeBackupPhaseCompleted
})
if err != nil {
log.WithError(err).Error("Error setting phase to Completed")
return err
}
return nil
}
// runCommand runs a command and returns its stdout, stderr, and its returned
// error (if any). If there are errors reading stdout or stderr, their return
// value(s) will contain the error as a string.
func runCommand(cmd *exec.Cmd) (string, string, error) {
stdoutBuf := new(bytes.Buffer)
stderrBuf := new(bytes.Buffer)
cmd.Stdout = stdoutBuf
cmd.Stderr = stderrBuf
runErr := cmd.Run()
var stdout, stderr string
if res, readErr := ioutil.ReadAll(stdoutBuf); readErr != nil {
stdout = errors.Wrap(readErr, "error reading command's stdout").Error()
} else {
stdout = string(res)
}
if res, readErr := ioutil.ReadAll(stderrBuf); readErr != nil {
stderr = errors.Wrap(readErr, "error reading command's stderr").Error()
} else {
stderr = string(res)
}
return stdout, stderr, runErr
}
func (c *podVolumeBackupController) patchPodVolumeBackup(req *arkv1api.PodVolumeBackup, mutate func(*arkv1api.PodVolumeBackup)) (*arkv1api.PodVolumeBackup, error) {
// Record original json
oldData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original PodVolumeBackup")
}
// Mutate
mutate(req)
// Record new json
newData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated PodVolumeBackup")
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, errors.Wrap(err, "error creating json merge patch for PodVolumeBackup")
}
req, err = c.podVolumeBackupClient.PodVolumeBackups(req.Namespace).Patch(req.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching PodVolumeBackup")
}
return req, nil
}
func (c *podVolumeBackupController) fail(req *arkv1api.PodVolumeBackup, msg string, log logrus.FieldLogger) error {
if _, err := c.patchPodVolumeBackup(req, func(r *arkv1api.PodVolumeBackup) {
r.Status.Phase = arkv1api.PodVolumeBackupPhaseFailed
r.Status.Message = msg
}); err != nil {
log.WithError(err).Error("Error setting phase to Failed")
return err
}
return nil
}
func updatePhaseFunc(phase arkv1api.PodVolumeBackupPhase) func(r *arkv1api.PodVolumeBackup) {
return func(r *arkv1api.PodVolumeBackup) {
r.Status.Phase = phase
}
}
func singlePathMatch(path string) (string, error) {
matches, err := filepath.Glob(path)
if err != nil {
return "", errors.WithStack(err)
}
if len(matches) != 1 {
return "", errors.Errorf("expected one matching path, got %d", len(matches))
}
return matches[0], nil
}

View File

@ -0,0 +1,356 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
corev1informers "k8s.io/client-go/informers/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/boolptr"
"github.com/heptio/ark/pkg/util/kube"
)
type podVolumeRestoreController struct {
*genericController
podVolumeRestoreClient arkv1client.PodVolumeRestoresGetter
podVolumeRestoreLister listers.PodVolumeRestoreLister
secretLister corev1listers.SecretLister
podLister corev1listers.PodLister
pvcLister corev1listers.PersistentVolumeClaimLister
nodeName string
processRestoreFunc func(*arkv1api.PodVolumeRestore) error
}
// NewPodVolumeRestoreController creates a new pod volume restore controller.
func NewPodVolumeRestoreController(
logger logrus.FieldLogger,
podVolumeRestoreInformer informers.PodVolumeRestoreInformer,
podVolumeRestoreClient arkv1client.PodVolumeRestoresGetter,
podInformer cache.SharedIndexInformer,
secretInformer corev1informers.SecretInformer,
pvcInformer corev1informers.PersistentVolumeClaimInformer,
nodeName string,
) Interface {
c := &podVolumeRestoreController{
genericController: newGenericController("pod-volume-restore", logger),
podVolumeRestoreClient: podVolumeRestoreClient,
podVolumeRestoreLister: podVolumeRestoreInformer.Lister(),
podLister: corev1listers.NewPodLister(podInformer.GetIndexer()),
secretLister: secretInformer.Lister(),
pvcLister: pvcInformer.Lister(),
nodeName: nodeName,
}
c.syncHandler = c.processQueueItem
c.cacheSyncWaiters = append(
c.cacheSyncWaiters,
podVolumeRestoreInformer.Informer().HasSynced,
secretInformer.Informer().HasSynced,
podInformer.HasSynced,
pvcInformer.Informer().HasSynced,
)
c.processRestoreFunc = c.processRestore
podVolumeRestoreInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.pvrHandler,
UpdateFunc: func(_, obj interface{}) {
c.pvrHandler(obj)
},
},
)
podInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.podHandler,
UpdateFunc: func(_, obj interface{}) {
c.podHandler(obj)
},
},
)
return c
}
func (c *podVolumeRestoreController) pvrHandler(obj interface{}) {
pvr := obj.(*arkv1api.PodVolumeRestore)
log := c.logger.WithField("key", kube.NamespaceAndName(pvr))
if !shouldEnqueuePVR(pvr, c.podLister, c.nodeName, log) {
return
}
log.Debug("enqueueing")
c.enqueue(obj)
}
func (c *podVolumeRestoreController) podHandler(obj interface{}) {
pod := obj.(*corev1api.Pod)
log := c.logger.WithField("key", kube.NamespaceAndName(pod))
for _, pvr := range pvrsToEnqueueForPod(pod, c.podVolumeRestoreLister, c.nodeName, log) {
c.enqueue(pvr)
}
}
func shouldProcessPod(pod *corev1api.Pod, nodeName string, log logrus.FieldLogger) bool {
// if the pod lister being used is filtered to pods on this node, this is superfluous,
// but retaining for safety.
if pod.Spec.NodeName != nodeName {
log.Debugf("Pod is scheduled on node %s, not enqueueing.", pod.Spec.NodeName)
return false
}
// only process items for pods that have the restic initContainer running
if !isPodWaiting(pod) {
log.Debugf("Pod is not running restic initContainer, not enqueueing.")
return false
}
return true
}
func shouldProcessPVR(pvr *arkv1api.PodVolumeRestore, log logrus.FieldLogger) bool {
// only process new items
if pvr.Status.Phase != "" && pvr.Status.Phase != arkv1api.PodVolumeRestorePhaseNew {
log.Debugf("Item has phase %s, not enqueueing.", pvr.Status.Phase)
return false
}
return true
}
func pvrsToEnqueueForPod(pod *corev1api.Pod, pvrLister listers.PodVolumeRestoreLister, nodeName string, log logrus.FieldLogger) []*arkv1api.PodVolumeRestore {
if !shouldProcessPod(pod, nodeName, log) {
return nil
}
selector, err := labels.Parse(fmt.Sprintf("%s=%s", arkv1api.PodUIDLabel, pod.UID))
if err != nil {
log.WithError(err).Error("Unable to parse label selector %s", fmt.Sprintf("%s=%s", arkv1api.PodUIDLabel, pod.UID))
return nil
}
pvrs, err := pvrLister.List(selector)
if err != nil {
log.WithError(err).Error("Unable to list pod volume restores")
return nil
}
var res []*arkv1api.PodVolumeRestore
for i, pvr := range pvrs {
if shouldProcessPVR(pvr, log) {
res = append(res, pvrs[i])
}
}
return res
}
func shouldEnqueuePVR(pvr *arkv1api.PodVolumeRestore, podLister corev1listers.PodLister, nodeName string, log logrus.FieldLogger) bool {
if !shouldProcessPVR(pvr, log) {
return false
}
pod, err := podLister.Pods(pvr.Spec.Pod.Namespace).Get(pvr.Spec.Pod.Name)
if err != nil {
log.WithError(err).Errorf("Unable to get item's pod %s/%s, not enqueueing.", pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name)
return false
}
if !shouldProcessPod(pod, nodeName, log) {
return false
}
return true
}
func isPodWaiting(pod *corev1api.Pod) bool {
return len(pod.Spec.InitContainers) == 0 ||
pod.Spec.InitContainers[0].Name != restic.InitContainer ||
len(pod.Status.InitContainerStatuses) == 0 ||
pod.Status.InitContainerStatuses[0].State.Running == nil
}
func (c *podVolumeRestoreController) processQueueItem(key string) error {
log := c.logger.WithField("key", key)
log.Debug("Running processItem")
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
log.WithError(errors.WithStack(err)).Error("error splitting queue key")
return nil
}
req, err := c.podVolumeRestoreLister.PodVolumeRestores(ns).Get(name)
if apierrors.IsNotFound(err) {
log.Debug("Unable to find PodVolumeRestore")
return nil
}
if err != nil {
return errors.Wrap(err, "error getting PodVolumeRestore")
}
// Don't mutate the shared cache
reqCopy := req.DeepCopy()
return c.processRestoreFunc(reqCopy)
}
func (c *podVolumeRestoreController) processRestore(req *arkv1api.PodVolumeRestore) error {
log := c.logger.WithFields(logrus.Fields{
"namespace": req.Namespace,
"name": req.Name,
})
var err error
// update status to InProgress
req, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(arkv1api.PodVolumeRestorePhaseInProgress))
if err != nil {
log.WithError(err).Error("Error setting phase to InProgress")
return errors.WithStack(err)
}
pod, err := c.podLister.Pods(req.Spec.Pod.Namespace).Get(req.Spec.Pod.Name)
if err != nil {
log.WithError(err).Errorf("Error getting pod %s/%s", req.Spec.Pod.Namespace, req.Spec.Pod.Name)
return c.fail(req, errors.Wrap(err, "error getting pod").Error(), log)
}
volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister)
if err != nil {
log.WithError(err).Error("Error getting volume directory name")
return c.fail(req, errors.Wrap(err, "error getting volume directory name").Error(), log)
}
// temp creds
file, err := restic.TempCredentialsFile(c.secretLister, req.Spec.Pod.Namespace)
if err != nil {
log.WithError(err).Error("Error creating temp restic credentials file")
return c.fail(req, errors.Wrap(err, "error creating temp restic credentials file").Error(), log)
}
// ignore error since there's nothing we can do and it's a temp file.
defer os.Remove(file)
resticCmd := restic.RestoreCommand(
req.Spec.RepoPrefix,
req.Spec.Pod.Namespace,
file,
string(req.Spec.Pod.UID),
req.Spec.SnapshotID,
)
var stdout, stderr string
if stdout, stderr, err = runCommand(resticCmd.Cmd()); err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error running command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
return c.fail(req, fmt.Sprintf("error running restic restore, stderr=%s: %s", stderr, err.Error()), log)
}
log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
var restoreUID types.UID
for _, owner := range req.OwnerReferences {
if boolptr.IsSetToTrue(owner.Controller) {
restoreUID = owner.UID
break
}
}
cmd := exec.Command("/bin/sh", "-c", strings.Join([]string{"/complete-restore.sh", string(req.Spec.Pod.UID), volumeDir, string(restoreUID)}, " "))
if stdout, stderr, err = runCommand(cmd); err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error running command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
return c.fail(req, fmt.Sprintf("error running restic restore, stderr=%s: %s", stderr, err.Error()), log)
}
log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr)
// update status to Completed
if _, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(arkv1api.PodVolumeRestorePhaseCompleted)); err != nil {
log.WithError(err).Error("Error setting phase to Completed")
return err
}
return nil
}
func (c *podVolumeRestoreController) patchPodVolumeRestore(req *arkv1api.PodVolumeRestore, mutate func(*arkv1api.PodVolumeRestore)) (*arkv1api.PodVolumeRestore, error) {
// Record original json
oldData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling original PodVolumeRestore")
}
// Mutate
mutate(req)
// Record new json
newData, err := json.Marshal(req)
if err != nil {
return nil, errors.Wrap(err, "error marshalling updated PodVolumeRestore")
}
patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData)
if err != nil {
return nil, errors.Wrap(err, "error creating json merge patch for PodVolumeRestore")
}
req, err = c.podVolumeRestoreClient.PodVolumeRestores(req.Namespace).Patch(req.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching PodVolumeRestore")
}
return req, nil
}
func (c *podVolumeRestoreController) fail(req *arkv1api.PodVolumeRestore, msg string, log logrus.FieldLogger) error {
if _, err := c.patchPodVolumeRestore(req, func(pvr *arkv1api.PodVolumeRestore) {
pvr.Status.Phase = arkv1api.PodVolumeRestorePhaseFailed
pvr.Status.Message = msg
}); err != nil {
log.WithError(err).Error("Error setting phase to Failed")
return err
}
return nil
}
func updatePodVolumeRestorePhaseFunc(phase arkv1api.PodVolumeRestorePhase) func(r *arkv1api.PodVolumeRestore) {
return func(r *arkv1api.PodVolumeRestore) {
r.Status.Phase = phase
}
}

View File

@ -31,6 +31,8 @@ type ArkV1Interface interface {
ConfigsGetter
DeleteBackupRequestsGetter
DownloadRequestsGetter
PodVolumeBackupsGetter
PodVolumeRestoresGetter
RestoresGetter
SchedulesGetter
}
@ -56,6 +58,14 @@ func (c *ArkV1Client) DownloadRequests(namespace string) DownloadRequestInterfac
return newDownloadRequests(c, namespace)
}
func (c *ArkV1Client) PodVolumeBackups(namespace string) PodVolumeBackupInterface {
return newPodVolumeBackups(c, namespace)
}
func (c *ArkV1Client) PodVolumeRestores(namespace string) PodVolumeRestoreInterface {
return newPodVolumeRestores(c, namespace)
}
func (c *ArkV1Client) Restores(namespace string) RestoreInterface {
return newRestores(c, namespace)
}

View File

@ -44,6 +44,14 @@ func (c *FakeArkV1) DownloadRequests(namespace string) v1.DownloadRequestInterfa
return &FakeDownloadRequests{c, namespace}
}
func (c *FakeArkV1) PodVolumeBackups(namespace string) v1.PodVolumeBackupInterface {
return &FakePodVolumeBackups{c, namespace}
}
func (c *FakeArkV1) PodVolumeRestores(namespace string) v1.PodVolumeRestoreInterface {
return &FakePodVolumeRestores{c, namespace}
}
func (c *FakeArkV1) Restores(namespace string) v1.RestoreInterface {
return &FakeRestores{c, namespace}
}

View File

@ -0,0 +1,140 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakePodVolumeBackups implements PodVolumeBackupInterface
type FakePodVolumeBackups struct {
Fake *FakeArkV1
ns string
}
var podvolumebackupsResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "podvolumebackups"}
var podvolumebackupsKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "PodVolumeBackup"}
// Get takes name of the podVolumeBackup, and returns the corresponding podVolumeBackup object, and an error if there is any.
func (c *FakePodVolumeBackups) Get(name string, options v1.GetOptions) (result *ark_v1.PodVolumeBackup, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(podvolumebackupsResource, c.ns, name), &ark_v1.PodVolumeBackup{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeBackup), err
}
// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors.
func (c *FakePodVolumeBackups) List(opts v1.ListOptions) (result *ark_v1.PodVolumeBackupList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(podvolumebackupsResource, podvolumebackupsKind, c.ns, opts), &ark_v1.PodVolumeBackupList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &ark_v1.PodVolumeBackupList{}
for _, item := range obj.(*ark_v1.PodVolumeBackupList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested podVolumeBackups.
func (c *FakePodVolumeBackups) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(podvolumebackupsResource, c.ns, opts))
}
// Create takes the representation of a podVolumeBackup and creates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any.
func (c *FakePodVolumeBackups) Create(podVolumeBackup *ark_v1.PodVolumeBackup) (result *ark_v1.PodVolumeBackup, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(podvolumebackupsResource, c.ns, podVolumeBackup), &ark_v1.PodVolumeBackup{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeBackup), err
}
// Update takes the representation of a podVolumeBackup and updates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any.
func (c *FakePodVolumeBackups) Update(podVolumeBackup *ark_v1.PodVolumeBackup) (result *ark_v1.PodVolumeBackup, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(podvolumebackupsResource, c.ns, podVolumeBackup), &ark_v1.PodVolumeBackup{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeBackup), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePodVolumeBackups) UpdateStatus(podVolumeBackup *ark_v1.PodVolumeBackup) (*ark_v1.PodVolumeBackup, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(podvolumebackupsResource, "status", c.ns, podVolumeBackup), &ark_v1.PodVolumeBackup{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeBackup), err
}
// Delete takes name of the podVolumeBackup and deletes it. Returns an error if one occurs.
func (c *FakePodVolumeBackups) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(podvolumebackupsResource, c.ns, name), &ark_v1.PodVolumeBackup{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePodVolumeBackups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(podvolumebackupsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &ark_v1.PodVolumeBackupList{})
return err
}
// Patch applies the patch and returns the patched podVolumeBackup.
func (c *FakePodVolumeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *ark_v1.PodVolumeBackup, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podvolumebackupsResource, c.ns, name, data, subresources...), &ark_v1.PodVolumeBackup{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeBackup), err
}

View File

@ -0,0 +1,140 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakePodVolumeRestores implements PodVolumeRestoreInterface
type FakePodVolumeRestores struct {
Fake *FakeArkV1
ns string
}
var podvolumerestoresResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "podvolumerestores"}
var podvolumerestoresKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "PodVolumeRestore"}
// Get takes name of the podVolumeRestore, and returns the corresponding podVolumeRestore object, and an error if there is any.
func (c *FakePodVolumeRestores) Get(name string, options v1.GetOptions) (result *ark_v1.PodVolumeRestore, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(podvolumerestoresResource, c.ns, name), &ark_v1.PodVolumeRestore{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeRestore), err
}
// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors.
func (c *FakePodVolumeRestores) List(opts v1.ListOptions) (result *ark_v1.PodVolumeRestoreList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(podvolumerestoresResource, podvolumerestoresKind, c.ns, opts), &ark_v1.PodVolumeRestoreList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &ark_v1.PodVolumeRestoreList{}
for _, item := range obj.(*ark_v1.PodVolumeRestoreList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested podVolumeRestores.
func (c *FakePodVolumeRestores) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(podvolumerestoresResource, c.ns, opts))
}
// Create takes the representation of a podVolumeRestore and creates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any.
func (c *FakePodVolumeRestores) Create(podVolumeRestore *ark_v1.PodVolumeRestore) (result *ark_v1.PodVolumeRestore, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(podvolumerestoresResource, c.ns, podVolumeRestore), &ark_v1.PodVolumeRestore{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeRestore), err
}
// Update takes the representation of a podVolumeRestore and updates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any.
func (c *FakePodVolumeRestores) Update(podVolumeRestore *ark_v1.PodVolumeRestore) (result *ark_v1.PodVolumeRestore, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(podvolumerestoresResource, c.ns, podVolumeRestore), &ark_v1.PodVolumeRestore{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeRestore), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePodVolumeRestores) UpdateStatus(podVolumeRestore *ark_v1.PodVolumeRestore) (*ark_v1.PodVolumeRestore, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(podvolumerestoresResource, "status", c.ns, podVolumeRestore), &ark_v1.PodVolumeRestore{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeRestore), err
}
// Delete takes name of the podVolumeRestore and deletes it. Returns an error if one occurs.
func (c *FakePodVolumeRestores) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(podvolumerestoresResource, c.ns, name), &ark_v1.PodVolumeRestore{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePodVolumeRestores) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(podvolumerestoresResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &ark_v1.PodVolumeRestoreList{})
return err
}
// Patch applies the patch and returns the patched podVolumeRestore.
func (c *FakePodVolumeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *ark_v1.PodVolumeRestore, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(podvolumerestoresResource, c.ns, name, data, subresources...), &ark_v1.PodVolumeRestore{})
if obj == nil {
return nil, err
}
return obj.(*ark_v1.PodVolumeRestore), err
}

View File

@ -26,6 +26,10 @@ type DeleteBackupRequestExpansion interface{}
type DownloadRequestExpansion interface{}
type PodVolumeBackupExpansion interface{}
type PodVolumeRestoreExpansion interface{}
type RestoreExpansion interface{}
type ScheduleExpansion interface{}

View File

@ -0,0 +1,174 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// PodVolumeBackupsGetter has a method to return a PodVolumeBackupInterface.
// A group's client should implement this interface.
type PodVolumeBackupsGetter interface {
PodVolumeBackups(namespace string) PodVolumeBackupInterface
}
// PodVolumeBackupInterface has methods to work with PodVolumeBackup resources.
type PodVolumeBackupInterface interface {
Create(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error)
Update(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error)
UpdateStatus(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.PodVolumeBackup, error)
List(opts meta_v1.ListOptions) (*v1.PodVolumeBackupList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeBackup, err error)
PodVolumeBackupExpansion
}
// podVolumeBackups implements PodVolumeBackupInterface
type podVolumeBackups struct {
client rest.Interface
ns string
}
// newPodVolumeBackups returns a PodVolumeBackups
func newPodVolumeBackups(c *ArkV1Client, namespace string) *podVolumeBackups {
return &podVolumeBackups{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the podVolumeBackup, and returns the corresponding podVolumeBackup object, and an error if there is any.
func (c *podVolumeBackups) Get(name string, options meta_v1.GetOptions) (result *v1.PodVolumeBackup, err error) {
result = &v1.PodVolumeBackup{}
err = c.client.Get().
Namespace(c.ns).
Resource("podvolumebackups").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors.
func (c *podVolumeBackups) List(opts meta_v1.ListOptions) (result *v1.PodVolumeBackupList, err error) {
result = &v1.PodVolumeBackupList{}
err = c.client.Get().
Namespace(c.ns).
Resource("podvolumebackups").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested podVolumeBackups.
func (c *podVolumeBackups) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("podvolumebackups").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a podVolumeBackup and creates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any.
func (c *podVolumeBackups) Create(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) {
result = &v1.PodVolumeBackup{}
err = c.client.Post().
Namespace(c.ns).
Resource("podvolumebackups").
Body(podVolumeBackup).
Do().
Into(result)
return
}
// Update takes the representation of a podVolumeBackup and updates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any.
func (c *podVolumeBackups) Update(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) {
result = &v1.PodVolumeBackup{}
err = c.client.Put().
Namespace(c.ns).
Resource("podvolumebackups").
Name(podVolumeBackup.Name).
Body(podVolumeBackup).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *podVolumeBackups) UpdateStatus(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) {
result = &v1.PodVolumeBackup{}
err = c.client.Put().
Namespace(c.ns).
Resource("podvolumebackups").
Name(podVolumeBackup.Name).
SubResource("status").
Body(podVolumeBackup).
Do().
Into(result)
return
}
// Delete takes name of the podVolumeBackup and deletes it. Returns an error if one occurs.
func (c *podVolumeBackups) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podvolumebackups").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *podVolumeBackups) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podvolumebackups").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched podVolumeBackup.
func (c *podVolumeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeBackup, err error) {
result = &v1.PodVolumeBackup{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("podvolumebackups").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@ -0,0 +1,174 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// PodVolumeRestoresGetter has a method to return a PodVolumeRestoreInterface.
// A group's client should implement this interface.
type PodVolumeRestoresGetter interface {
PodVolumeRestores(namespace string) PodVolumeRestoreInterface
}
// PodVolumeRestoreInterface has methods to work with PodVolumeRestore resources.
type PodVolumeRestoreInterface interface {
Create(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error)
Update(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error)
UpdateStatus(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.PodVolumeRestore, error)
List(opts meta_v1.ListOptions) (*v1.PodVolumeRestoreList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeRestore, err error)
PodVolumeRestoreExpansion
}
// podVolumeRestores implements PodVolumeRestoreInterface
type podVolumeRestores struct {
client rest.Interface
ns string
}
// newPodVolumeRestores returns a PodVolumeRestores
func newPodVolumeRestores(c *ArkV1Client, namespace string) *podVolumeRestores {
return &podVolumeRestores{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the podVolumeRestore, and returns the corresponding podVolumeRestore object, and an error if there is any.
func (c *podVolumeRestores) Get(name string, options meta_v1.GetOptions) (result *v1.PodVolumeRestore, err error) {
result = &v1.PodVolumeRestore{}
err = c.client.Get().
Namespace(c.ns).
Resource("podvolumerestores").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors.
func (c *podVolumeRestores) List(opts meta_v1.ListOptions) (result *v1.PodVolumeRestoreList, err error) {
result = &v1.PodVolumeRestoreList{}
err = c.client.Get().
Namespace(c.ns).
Resource("podvolumerestores").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested podVolumeRestores.
func (c *podVolumeRestores) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("podvolumerestores").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a podVolumeRestore and creates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any.
func (c *podVolumeRestores) Create(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) {
result = &v1.PodVolumeRestore{}
err = c.client.Post().
Namespace(c.ns).
Resource("podvolumerestores").
Body(podVolumeRestore).
Do().
Into(result)
return
}
// Update takes the representation of a podVolumeRestore and updates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any.
func (c *podVolumeRestores) Update(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) {
result = &v1.PodVolumeRestore{}
err = c.client.Put().
Namespace(c.ns).
Resource("podvolumerestores").
Name(podVolumeRestore.Name).
Body(podVolumeRestore).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *podVolumeRestores) UpdateStatus(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) {
result = &v1.PodVolumeRestore{}
err = c.client.Put().
Namespace(c.ns).
Resource("podvolumerestores").
Name(podVolumeRestore.Name).
SubResource("status").
Body(podVolumeRestore).
Do().
Into(result)
return
}
// Delete takes name of the podVolumeRestore and deletes it. Returns an error if one occurs.
func (c *podVolumeRestores) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podvolumerestores").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *podVolumeRestores) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("podvolumerestores").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched podVolumeRestore.
func (c *podVolumeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeRestore, err error) {
result = &v1.PodVolumeRestore{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("podvolumerestores").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@ -32,6 +32,10 @@ type Interface interface {
DeleteBackupRequests() DeleteBackupRequestInformer
// DownloadRequests returns a DownloadRequestInformer.
DownloadRequests() DownloadRequestInformer
// PodVolumeBackups returns a PodVolumeBackupInformer.
PodVolumeBackups() PodVolumeBackupInformer
// PodVolumeRestores returns a PodVolumeRestoreInformer.
PodVolumeRestores() PodVolumeRestoreInformer
// Restores returns a RestoreInformer.
Restores() RestoreInformer
// Schedules returns a ScheduleInformer.
@ -69,6 +73,16 @@ func (v *version) DownloadRequests() DownloadRequestInformer {
return &downloadRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PodVolumeBackups returns a PodVolumeBackupInformer.
func (v *version) PodVolumeBackups() PodVolumeBackupInformer {
return &podVolumeBackupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PodVolumeRestores returns a PodVolumeRestoreInformer.
func (v *version) PodVolumeRestores() PodVolumeRestoreInformer {
return &podVolumeRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// Restores returns a RestoreInformer.
func (v *version) Restores() RestoreInformer {
return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}

View File

@ -0,0 +1,89 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
versioned "github.com/heptio/ark/pkg/generated/clientset/versioned"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PodVolumeBackupInformer provides access to a shared informer and lister for
// PodVolumeBackups.
type PodVolumeBackupInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.PodVolumeBackupLister
}
type podVolumeBackupInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPodVolumeBackupInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ArkV1().PodVolumeBackups(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ArkV1().PodVolumeBackups(namespace).Watch(options)
},
},
&ark_v1.PodVolumeBackup{},
resyncPeriod,
indexers,
)
}
func (f *podVolumeBackupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPodVolumeBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *podVolumeBackupInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&ark_v1.PodVolumeBackup{}, f.defaultInformer)
}
func (f *podVolumeBackupInformer) Lister() v1.PodVolumeBackupLister {
return v1.NewPodVolumeBackupLister(f.Informer().GetIndexer())
}

View File

@ -0,0 +1,89 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
versioned "github.com/heptio/ark/pkg/generated/clientset/versioned"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PodVolumeRestoreInformer provides access to a shared informer and lister for
// PodVolumeRestores.
type PodVolumeRestoreInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.PodVolumeRestoreLister
}
type podVolumeRestoreInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPodVolumeRestoreInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ArkV1().PodVolumeRestores(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.ArkV1().PodVolumeRestores(namespace).Watch(options)
},
},
&ark_v1.PodVolumeRestore{},
resyncPeriod,
indexers,
)
}
func (f *podVolumeRestoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPodVolumeRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *podVolumeRestoreInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&ark_v1.PodVolumeRestore{}, f.defaultInformer)
}
func (f *podVolumeRestoreInformer) Lister() v1.PodVolumeRestoreLister {
return v1.NewPodVolumeRestoreLister(f.Informer().GetIndexer())
}

View File

@ -61,6 +61,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().DeleteBackupRequests().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("downloadrequests"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().DownloadRequests().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("podvolumebackups"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().PodVolumeBackups().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("podvolumerestores"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().PodVolumeRestores().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("restores"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().Restores().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("schedules"):

View File

@ -50,6 +50,22 @@ type DownloadRequestListerExpansion interface{}
// DownloadRequestNamespaceLister.
type DownloadRequestNamespaceListerExpansion interface{}
// PodVolumeBackupListerExpansion allows custom methods to be added to
// PodVolumeBackupLister.
type PodVolumeBackupListerExpansion interface{}
// PodVolumeBackupNamespaceListerExpansion allows custom methods to be added to
// PodVolumeBackupNamespaceLister.
type PodVolumeBackupNamespaceListerExpansion interface{}
// PodVolumeRestoreListerExpansion allows custom methods to be added to
// PodVolumeRestoreLister.
type PodVolumeRestoreListerExpansion interface{}
// PodVolumeRestoreNamespaceListerExpansion allows custom methods to be added to
// PodVolumeRestoreNamespaceLister.
type PodVolumeRestoreNamespaceListerExpansion interface{}
// RestoreListerExpansion allows custom methods to be added to
// RestoreLister.
type RestoreListerExpansion interface{}

View File

@ -0,0 +1,94 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PodVolumeBackupLister helps list PodVolumeBackups.
type PodVolumeBackupLister interface {
// List lists all PodVolumeBackups in the indexer.
List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error)
// PodVolumeBackups returns an object that can list and get PodVolumeBackups.
PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister
PodVolumeBackupListerExpansion
}
// podVolumeBackupLister implements the PodVolumeBackupLister interface.
type podVolumeBackupLister struct {
indexer cache.Indexer
}
// NewPodVolumeBackupLister returns a new PodVolumeBackupLister.
func NewPodVolumeBackupLister(indexer cache.Indexer) PodVolumeBackupLister {
return &podVolumeBackupLister{indexer: indexer}
}
// List lists all PodVolumeBackups in the indexer.
func (s *podVolumeBackupLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.PodVolumeBackup))
})
return ret, err
}
// PodVolumeBackups returns an object that can list and get PodVolumeBackups.
func (s *podVolumeBackupLister) PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister {
return podVolumeBackupNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PodVolumeBackupNamespaceLister helps list and get PodVolumeBackups.
type PodVolumeBackupNamespaceLister interface {
// List lists all PodVolumeBackups in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error)
// Get retrieves the PodVolumeBackup from the indexer for a given namespace and name.
Get(name string) (*v1.PodVolumeBackup, error)
PodVolumeBackupNamespaceListerExpansion
}
// podVolumeBackupNamespaceLister implements the PodVolumeBackupNamespaceLister
// interface.
type podVolumeBackupNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PodVolumeBackups in the indexer for a given namespace.
func (s podVolumeBackupNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.PodVolumeBackup))
})
return ret, err
}
// Get retrieves the PodVolumeBackup from the indexer for a given namespace and name.
func (s podVolumeBackupNamespaceLister) Get(name string) (*v1.PodVolumeBackup, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("podvolumebackup"), name)
}
return obj.(*v1.PodVolumeBackup), nil
}

View File

@ -0,0 +1,94 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PodVolumeRestoreLister helps list PodVolumeRestores.
type PodVolumeRestoreLister interface {
// List lists all PodVolumeRestores in the indexer.
List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error)
// PodVolumeRestores returns an object that can list and get PodVolumeRestores.
PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister
PodVolumeRestoreListerExpansion
}
// podVolumeRestoreLister implements the PodVolumeRestoreLister interface.
type podVolumeRestoreLister struct {
indexer cache.Indexer
}
// NewPodVolumeRestoreLister returns a new PodVolumeRestoreLister.
func NewPodVolumeRestoreLister(indexer cache.Indexer) PodVolumeRestoreLister {
return &podVolumeRestoreLister{indexer: indexer}
}
// List lists all PodVolumeRestores in the indexer.
func (s *podVolumeRestoreLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.PodVolumeRestore))
})
return ret, err
}
// PodVolumeRestores returns an object that can list and get PodVolumeRestores.
func (s *podVolumeRestoreLister) PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister {
return podVolumeRestoreNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// PodVolumeRestoreNamespaceLister helps list and get PodVolumeRestores.
type PodVolumeRestoreNamespaceLister interface {
// List lists all PodVolumeRestores in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error)
// Get retrieves the PodVolumeRestore from the indexer for a given namespace and name.
Get(name string) (*v1.PodVolumeRestore, error)
PodVolumeRestoreNamespaceListerExpansion
}
// podVolumeRestoreNamespaceLister implements the PodVolumeRestoreNamespaceLister
// interface.
type podVolumeRestoreNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all PodVolumeRestores in the indexer for a given namespace.
func (s podVolumeRestoreNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.PodVolumeRestore))
})
return ret, err
}
// Get retrieves the PodVolumeRestore from the indexer for a given namespace and name.
func (s podVolumeRestoreNamespaceLister) Get(name string) (*v1.PodVolumeRestore, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("podvolumerestore"), name)
}
return obj.(*v1.PodVolumeRestore), nil
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
@ -11,9 +27,10 @@ import (
type arkConfigOption func(*arkConfig)
type arkConfig struct {
backupSyncPeriod time.Duration
gcSyncPeriod time.Duration
restoreOnly bool
backupSyncPeriod time.Duration
gcSyncPeriod time.Duration
podVolumeOperationTimeout time.Duration
restoreOnly bool
}
func WithBackupSyncPeriod(t time.Duration) arkConfigOption {
@ -28,6 +45,12 @@ func WithGCSyncPeriod(t time.Duration) arkConfigOption {
}
}
func WithPodVolumeOperationTimeout(t time.Duration) arkConfigOption {
return func(c *arkConfig) {
c.podVolumeOperationTimeout = t
}
}
func WithRestoreOnly() arkConfigOption {
return func(c *arkConfig) {
c.restoreOnly = true
@ -44,8 +67,9 @@ func Config(
opts ...arkConfigOption,
) *arkv1.Config {
c := &arkConfig{
backupSyncPeriod: 30 * time.Minute,
gcSyncPeriod: 30 * time.Minute,
backupSyncPeriod: 30 * time.Minute,
gcSyncPeriod: 30 * time.Minute,
podVolumeOperationTimeout: 60 * time.Minute,
}
for _, opt := range opts {
@ -74,6 +98,9 @@ func Config(
ScheduleSyncPeriod: metav1.Duration{
Duration: time.Minute,
},
PodVolumeOperationTimeout: metav1.Duration{
Duration: c.podVolumeOperationTimeout,
},
RestoreOnlyMode: c.restoreOnly,
}
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
@ -18,6 +34,8 @@ func CRDs() []*apiextv1beta1.CustomResourceDefinition {
crd("Config", "configs"),
crd("DownloadRequest", "downloadrequests"),
crd("DeleteBackupRequest", "deletebackuprequests"),
crd("PodVolumeBackup", "podvolumebackups"),
crd("PodVolumeRestore", "podvolumerestores"),
}
}

130
pkg/install/daemonset.go Normal file
View File

@ -0,0 +1,130 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet {
c := &podTemplateConfig{
image: "gcr.io/heptio-images/ark:latest",
}
for _, opt := range opts {
opt(c)
}
pullPolicy := corev1.PullAlways
imageParts := strings.Split(c.image, ":")
if len(imageParts) == 2 && imageParts[1] != "latest" {
pullPolicy = corev1.PullIfNotPresent
}
daemonSet := &appsv1.DaemonSet{
ObjectMeta: objectMeta(namespace, "restic"),
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": "restic",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": "restic",
},
},
Spec: corev1.PodSpec{
ServiceAccountName: "ark",
Volumes: []corev1.Volume{
{
Name: "host-pods",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/lib/kubelet/pods",
},
},
},
},
Containers: []corev1.Container{
{
Name: "restic",
Image: c.image,
ImagePullPolicy: pullPolicy,
VolumeMounts: []corev1.VolumeMount{
{
Name: "host-pods",
MountPath: "/host_pods",
},
},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "spec.nodeName",
},
},
},
{
Name: "HEPTIO_ARK_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
{
Name: "GOOGLE_APPLICATION_CREDENTIALS",
Value: "/credentials/cloud",
},
{
Name: "AWS_SHARED_CREDENTIALS_FILE",
Value: "/credentials/cloud",
},
},
},
},
},
},
},
}
if !c.withoutCredentialsVolume {
daemonSet.Spec.Template.Spec.Volumes = append(
daemonSet.Spec.Template.Spec.Volumes,
corev1.Volume{
Name: "cloud-credentials",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "cloud-credentials",
},
},
},
)
}
daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, c.envVars...)
return daemonSet
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (
@ -8,27 +24,44 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type deploymentOption func(*deploymentConfig)
type podTemplateOption func(*podTemplateConfig)
type deploymentConfig struct {
type podTemplateConfig struct {
image string
withoutCredentialsVolume bool
envVars []corev1.EnvVar
}
func WithImage(image string) deploymentOption {
return func(c *deploymentConfig) {
func WithImage(image string) podTemplateOption {
return func(c *podTemplateConfig) {
c.image = image
}
}
func WithoutCredentialsVolume() deploymentOption {
return func(c *deploymentConfig) {
func WithoutCredentialsVolume() podTemplateOption {
return func(c *podTemplateConfig) {
c.withoutCredentialsVolume = true
}
}
func Deployment(namespace string, opts ...deploymentOption) *appsv1beta1.Deployment {
c := &deploymentConfig{
func WithEnvFromSecretKey(varName, secret, key string) podTemplateOption {
return func(c *podTemplateConfig) {
c.envVars = append(c.envVars, corev1.EnvVar{
Name: varName,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: secret,
},
Key: key,
},
},
})
}
}
func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deployment {
c := &podTemplateConfig{
image: "gcr.io/heptio-images/ark:latest",
}

View File

@ -1,3 +1,19 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package install
import (

View File

@ -193,6 +193,7 @@ func (m *manager) registerPlugins() error {
m.pluginRegistry.register("job", arkCommand, []string{"run-plugin", string(PluginKindRestoreItemAction), "job"}, PluginKindRestoreItemAction)
m.pluginRegistry.register("restore-pod", arkCommand, []string{"run-plugin", string(PluginKindRestoreItemAction), "pod"}, PluginKindRestoreItemAction)
m.pluginRegistry.register("svc", arkCommand, []string{"run-plugin", string(PluginKindRestoreItemAction), "svc"}, PluginKindRestoreItemAction)
m.pluginRegistry.register("restic", arkCommand, []string{"run-plugin", string(PluginKindRestoreItemAction), "restic"}, PluginKindRestoreItemAction)
// second, register external plugins (these will override internal plugins, if applicable)
if _, err := os.Stat(m.pluginDir); err != nil {

View File

@ -14,28 +14,32 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
package podexec
import (
"bytes"
"net/url"
"time"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/collections"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
kscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/collections"
)
// podCommandExecutor is capable of executing a command in a container in a pod.
type podCommandExecutor interface {
// executePodCommand executes a command in a container in a pod. If the command takes longer than
const defaultTimeout = 30 * time.Second
// PodCommandExecutor is capable of executing a command in a container in a pod.
type PodCommandExecutor interface {
// ExecutePodCommand executes a command in a container in a pod. If the command takes longer than
// the specified timeout, an error is returned.
executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
ExecutePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error
}
type poster interface {
@ -49,8 +53,8 @@ type defaultPodCommandExecutor struct {
streamExecutorFactory streamExecutorFactory
}
// NewPodCommandExecutor creates a new podCommandExecutor.
func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) podCommandExecutor {
// NewPodCommandExecutor creates a new PodCommandExecutor.
func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) PodCommandExecutor {
return &defaultPodCommandExecutor{
restClientConfig: restClientConfig,
restClient: restClient,
@ -59,11 +63,11 @@ func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) pod
}
}
// executePodCommand uses the pod exec API to execute a command in a container in a pod. If the
// ExecutePodCommand uses the pod exec API to execute a command in a container in a pod. If the
// command takes longer than the specified timeout, an error is returned (NOTE: it is not currently
// possible to ensure the command is terminated when the timeout occurs, so it may continue to run
// in the background).
func (e *defaultPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
func (e *defaultPodCommandExecutor) ExecutePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error {
if item == nil {
return errors.New("item is required")
}
@ -101,7 +105,7 @@ func (e *defaultPodCommandExecutor) executePodCommand(log logrus.FieldLogger, it
}
if hook.Timeout.Duration == 0 {
hook.Timeout.Duration = defaultHookTimeout
hook.Timeout.Duration = defaultTimeout
}
hookLog := log.WithFields(

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
package podexec
import (
"bytes"
@ -27,7 +27,6 @@ import (
"github.com/heptio/ark/pkg/apis/ark/v1"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
@ -82,7 +81,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) {
},
{
name: "container not found",
item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
item: arktest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
podNamespace: "ns",
podName: "pod",
hookName: "hook",
@ -92,7 +91,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) {
},
{
name: "command missing",
item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
item: arktest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object,
podNamespace: "ns",
podName: "pod",
hookName: "hook",
@ -105,7 +104,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
e := &defaultPodCommandExecutor{}
err := e.executePodCommand(arktest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook)
err := e.ExecutePodCommand(arktest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook)
assert.Error(t, err)
})
}
@ -161,7 +160,7 @@ func TestExecutePodCommand(t *testing.T) {
Timeout: metav1.Duration{Duration: test.timeout},
}
pod, err := getAsMap(`
pod, err := arktest.GetAsMap(`
{
"metadata": {
"namespace": "namespace",
@ -209,7 +208,7 @@ func TestExecutePodCommand(t *testing.T) {
}
streamExecutor.On("Stream", expectedStreamOptions).Return(test.hookError)
err = podCommandExecutor.executePodCommand(arktest.NewLogger(), pod, "namespace", "name", "hookName", &hook)
err = podCommandExecutor.ExecutePodCommand(arktest.NewLogger(), pod, "namespace", "name", "hookName", &hook)
if test.expectedError != "" {
assert.EqualError(t, err, test.expectedError)
return
@ -265,12 +264,3 @@ func (p *mockPoster) Post() *rest.Request {
args := p.Called()
return args.Get(0).(*rest.Request)
}
type mockPodCommandExecutor struct {
mock.Mock
}
func (e *mockPodCommandExecutor) executePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
args := e.Called(log, item, namespace, name, hookName, hook)
return args.Error(0)
}

177
pkg/restic/backupper.go Normal file
View File

@ -0,0 +1,177 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"context"
"fmt"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/boolptr"
)
// Backupper can execute restic backups of volumes in a pod.
type Backupper interface {
// BackupPodVolumes backs up all annotated volumes in a pod.
BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error)
}
type backupper struct {
repoManager *repositoryManager
ctx context.Context
results map[string]chan *arkv1api.PodVolumeBackup
resultsLock sync.Mutex
}
func newBackupper(ctx context.Context, repoManager *repositoryManager, podVolumeBackupInformer cache.SharedIndexInformer) *backupper {
b := &backupper{
repoManager: repoManager,
ctx: ctx,
results: make(map[string]chan *arkv1api.PodVolumeBackup),
}
podVolumeBackupInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, obj interface{}) {
pvb := obj.(*arkv1api.PodVolumeBackup)
if pvb.Status.Phase == arkv1api.PodVolumeBackupPhaseCompleted || pvb.Status.Phase == arkv1api.PodVolumeBackupPhaseFailed {
b.resultsLock.Lock()
b.results[resultsKey(pvb.Spec.Pod.Namespace, pvb.Spec.Pod.Name)] <- pvb
b.resultsLock.Unlock()
}
},
},
)
return b
}
func resultsKey(ns, name string) string {
return fmt.Sprintf("%s/%s", ns, name)
}
func (b *backupper) BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) {
// get volumes to backup from pod's annotations
volumesToBackup := GetVolumesToBackup(pod)
if len(volumesToBackup) == 0 {
return nil, nil
}
// ensure a repo exists for the pod's namespace
if err := b.repoManager.ensureRepo(pod.Namespace); err != nil {
return nil, []error{err}
}
resultsChan := make(chan *arkv1api.PodVolumeBackup)
b.resultsLock.Lock()
b.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan
b.resultsLock.Unlock()
var (
errs []error
volumeSnapshots = make(map[string]string)
)
for _, volumeName := range volumesToBackup {
b.repoManager.repoLocker.Lock(pod.Namespace)
defer b.repoManager.repoLocker.Unlock(pod.Namespace)
volumeBackup := newPodVolumeBackup(backup, pod, volumeName, b.repoManager.config.repoPrefix)
if err := errorOnly(b.repoManager.arkClient.ArkV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup)); err != nil {
errs = append(errs, err)
continue
}
volumeSnapshots[volumeName] = ""
}
ForEachVolume:
for i, count := 0, len(volumeSnapshots); i < count; i++ {
select {
case <-b.ctx.Done():
errs = append(errs, errors.New("timed out waiting for all PodVolumeBackups to complete"))
break ForEachVolume
case res := <-resultsChan:
switch res.Status.Phase {
case arkv1api.PodVolumeBackupPhaseCompleted:
volumeSnapshots[res.Spec.Volume] = res.Status.SnapshotID
case arkv1api.PodVolumeBackupPhaseFailed:
errs = append(errs, errors.Errorf("pod volume backup failed: %s", res.Status.Message))
delete(volumeSnapshots, res.Spec.Volume)
}
}
}
b.resultsLock.Lock()
delete(b.results, resultsKey(pod.Namespace, pod.Name))
b.resultsLock.Unlock()
return volumeSnapshots, errs
}
func newPodVolumeBackup(backup *arkv1api.Backup, pod *corev1api.Pod, volumeName, repoPrefix string) *arkv1api.PodVolumeBackup {
return &arkv1api.PodVolumeBackup{
ObjectMeta: metav1.ObjectMeta{
Namespace: backup.Namespace,
GenerateName: backup.Name + "-",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: arkv1api.SchemeGroupVersion.String(),
Kind: "Backup",
Name: backup.Name,
UID: backup.UID,
Controller: boolptr.True(),
},
},
Labels: map[string]string{
arkv1api.BackupNameLabel: backup.Name,
arkv1api.BackupUIDLabel: string(backup.UID),
},
},
Spec: arkv1api.PodVolumeBackupSpec{
Node: pod.Spec.NodeName,
Pod: corev1api.ObjectReference{
Kind: "Pod",
Namespace: pod.Namespace,
Name: pod.Name,
UID: pod.UID,
},
Volume: volumeName,
Tags: map[string]string{
"backup": backup.Name,
"backup-uid": string(backup.UID),
"pod": pod.Name,
"pod-uid": string(pod.UID),
"ns": pod.Namespace,
"volume": volumeName,
},
RepoPrefix: repoPrefix,
},
}
}

72
pkg/restic/command.go Normal file
View File

@ -0,0 +1,72 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"fmt"
"os/exec"
"strings"
)
// Command represents a restic command.
type Command struct {
BaseName string
Command string
RepoPrefix string
Repo string
PasswordFile string
Args []string
ExtraFlags []string
}
// StringSlice returns the command as a slice of strings.
func (c *Command) StringSlice() []string {
var res []string
if c.BaseName != "" {
res = append(res, c.BaseName)
} else {
res = append(res, "/restic")
}
res = append(res, c.Command, repoFlag(c.RepoPrefix, c.Repo))
if c.PasswordFile != "" {
res = append(res, passwordFlag(c.PasswordFile))
}
res = append(res, c.Args...)
res = append(res, c.ExtraFlags...)
return res
}
// String returns the command as a string.
func (c *Command) String() string {
return strings.Join(c.StringSlice(), " ")
}
// Cmd returns an exec.Cmd for the command.
func (c *Command) Cmd() *exec.Cmd {
parts := c.StringSlice()
return exec.Command(parts[0], parts[1:]...)
}
func repoFlag(prefix, repo string) string {
return fmt.Sprintf("--repo=%s/%s", prefix, repo)
}
func passwordFlag(file string) string {
return fmt.Sprintf("--password-file=%s", file)
}

View File

@ -0,0 +1,91 @@
package restic
import (
"fmt"
"strings"
)
// BackupCommand returns a Command for running a restic backup.
func BackupCommand(repoPrefix, repo, passwordFile, path string, tags map[string]string) *Command {
return &Command{
Command: "backup",
RepoPrefix: repoPrefix,
Repo: repo,
PasswordFile: passwordFile,
Args: []string{path},
ExtraFlags: backupTagFlags(tags),
}
}
func backupTagFlags(tags map[string]string) []string {
var flags []string
for k, v := range tags {
flags = append(flags, fmt.Sprintf("--tag=%s=%s", k, v))
}
return flags
}
// RestoreCommand returns a Command for running a restic restore.
func RestoreCommand(repoPrefix, repo, passwordFile, podUID, snapshotID string) *Command {
return &Command{
Command: "restore",
RepoPrefix: repoPrefix,
Repo: repo,
PasswordFile: passwordFile,
Args: []string{snapshotID},
ExtraFlags: []string{fmt.Sprintf("--target=/restores/%s", podUID)},
}
}
// GetSnapshotCommand returns a Command for running a restic (get) snapshots.
func GetSnapshotCommand(repoPrefix, repo, passwordFile string, tags map[string]string) *Command {
return &Command{
Command: "snapshots",
RepoPrefix: repoPrefix,
Repo: repo,
PasswordFile: passwordFile,
ExtraFlags: []string{"--json", "--last", getSnapshotTagFlag(tags)},
}
}
func getSnapshotTagFlag(tags map[string]string) string {
var tagFilters []string
for k, v := range tags {
tagFilters = append(tagFilters, fmt.Sprintf("%s=%s", k, v))
}
return fmt.Sprintf("--tag=%s", strings.Join(tagFilters, ","))
}
func InitCommand(repoPrefix, repo string) *Command {
return &Command{
Command: "init",
RepoPrefix: repoPrefix,
Repo: repo,
}
}
func CheckCommand(repoPrefix, repo string) *Command {
return &Command{
Command: "check",
RepoPrefix: repoPrefix,
Repo: repo,
}
}
func PruneCommand(repoPrefix, repo string) *Command {
return &Command{
Command: "prune",
RepoPrefix: repoPrefix,
Repo: repo,
}
}
func ForgetCommand(repoPrefix, repo, snapshotID string) *Command {
return &Command{
Command: "forget",
RepoPrefix: repoPrefix,
Repo: repo,
Args: []string{snapshotID},
}
}

170
pkg/restic/common.go Normal file
View File

@ -0,0 +1,170 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"fmt"
"io/ioutil"
"strings"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
corev1listers "k8s.io/client-go/listers/core/v1"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
)
const (
InitContainer = "restic-wait"
podAnnotationPrefix = "snapshot.ark.heptio.com/"
volumesToBackupAnnotation = "backup.ark.heptio.com/backup-volumes"
)
// PodHasSnapshotAnnotation returns true if the object has an annotation
// indicating that there is a restic snapshot for a volume in this pod,
// or false otherwise.
func PodHasSnapshotAnnotation(obj metav1.Object) bool {
for key := range obj.GetAnnotations() {
if strings.HasPrefix(key, podAnnotationPrefix) {
return true
}
}
return false
}
// GetPodSnapshotAnnotations returns a map, of volume name -> snapshot id,
// of all restic snapshots for this pod.
func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string {
var res map[string]string
for k, v := range obj.GetAnnotations() {
if strings.HasPrefix(k, podAnnotationPrefix) {
if res == nil {
res = make(map[string]string)
}
res[k[len(podAnnotationPrefix):]] = v
}
}
return res
}
// SetPodSnapshotAnnotation adds an annotation to a pod to indicate that
// the specified volume has a restic snapshot with the provided id.
func SetPodSnapshotAnnotation(obj metav1.Object, volumeName, snapshotID string) {
annotations := obj.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[podAnnotationPrefix+volumeName] = snapshotID
obj.SetAnnotations(annotations)
}
// GetVolumesToBackup returns a list of volume names to backup for
// the provided pod.
func GetVolumesToBackup(obj metav1.Object) []string {
annotations := obj.GetAnnotations()
if annotations == nil {
return nil
}
backupsValue := annotations[volumesToBackupAnnotation]
if backupsValue == "" {
return nil
}
return strings.Split(backupsValue, ",")
}
// SnapshotIdentifier uniquely identifies a restic snapshot
// taken by Ark.
type SnapshotIdentifier struct {
// Repo is the name of the restic repository where the
// snapshot is located
Repo string
// SnapshotID is the short ID of the restic snapshot
SnapshotID string
}
// GetSnapshotsInBackup returns a list of all restic snapshot ids associated with
// a given Ark backup.
func GetSnapshotsInBackup(backup *arkv1api.Backup, podVolumeBackupLister arkv1listers.PodVolumeBackupLister) ([]SnapshotIdentifier, error) {
selector, err := labels.Parse(fmt.Sprintf("%s=%s", arkv1api.BackupNameLabel, backup.Name))
if err != nil {
return nil, errors.WithStack(err)
}
podVolumeBackups, err := podVolumeBackupLister.List(selector)
if err != nil {
return nil, errors.WithStack(err)
}
var res []SnapshotIdentifier
for _, item := range podVolumeBackups {
if item.Status.SnapshotID == "" {
continue
}
res = append(res, SnapshotIdentifier{
Repo: item.Spec.Pod.Namespace,
SnapshotID: item.Status.SnapshotID,
})
}
return res, nil
}
// TempCredentialsFile creates a temp file containing a restic
// encryption key for the given repo and returns its path. The
// caller should generally call os.Remove() to remove the file
// when done with it.
func TempCredentialsFile(secretLister corev1listers.SecretLister, repoName string) (string, error) {
secretGetter := NewListerSecretGetter(secretLister)
repoKey, err := GetRepositoryKey(secretGetter, repoName)
if err != nil {
return "", err
}
file, err := ioutil.TempFile("", fmt.Sprintf("%s-%s", CredentialsSecretName, repoName))
if err != nil {
return "", errors.WithStack(err)
}
if _, err := file.Write(repoKey); err != nil {
// nothing we can do about an error closing the file here, and we're
// already returning an error about the write failing.
file.Close()
return "", errors.WithStack(err)
}
name := file.Name()
if err := file.Close(); err != nil {
return "", errors.WithStack(err)
}
return name, nil
}

View File

@ -0,0 +1,36 @@
package restic
import (
"encoding/json"
"os/exec"
"github.com/pkg/errors"
)
// GetSnapshotID runs a 'restic snapshots' command to get the ID of the snapshot
// in the specified repo matching the set of provided tags, or an error if a
// unique snapshot cannot be identified.
func GetSnapshotID(repoPrefix, repo, passwordFile string, tags map[string]string) (string, error) {
output, err := GetSnapshotCommand(repoPrefix, repo, passwordFile, tags).Cmd().Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return "", errors.Wrapf(err, "error running command, stderr=%s", exitErr.Stderr)
}
return "", errors.Wrap(err, "error running command")
}
type snapshotID struct {
ShortID string `json:"short_id"`
}
var snapshots []snapshotID
if err := json.Unmarshal(output, &snapshots); err != nil {
return "", errors.Wrap(err, "error unmarshalling restic snapshots result")
}
if len(snapshots) != 1 {
return "", errors.Errorf("expected one matching snapshot, got %d", len(snapshots))
}
return snapshots[0].ShortID, nil
}

55
pkg/restic/repo_locker.go Normal file
View File

@ -0,0 +1,55 @@
package restic
import "sync"
// repoLocker manages exclusive/non-exclusive locks for
// operations against restic repositories. The semantics
// of exclusive/non-exclusive locks are the same as for
// a sync.RWMutex, where a non-exclusive lock is equivalent
// to a read lock, and an exclusive lock is equivalent to
// a write lock.
type repoLocker struct {
mu sync.Mutex
locks map[string]*sync.RWMutex
}
func newRepoLocker() *repoLocker {
return &repoLocker{
locks: make(map[string]*sync.RWMutex),
}
}
// LockExclusive acquires an exclusive lock for the specified
// repository. This function blocks until no other locks exist
// for the repo.
func (rl *repoLocker) LockExclusive(name string) {
rl.ensureLock(name).Lock()
}
// Lock acquires a non-exclusive lock for the specified
// repository. This function blocks until no exclusive
// locks exist for the repo.
func (rl *repoLocker) Lock(name string) {
rl.ensureLock(name).RLock()
}
// UnlockExclusive releases an exclusive lock for the repo.
func (rl *repoLocker) UnlockExclusive(name string) {
rl.ensureLock(name).Unlock()
}
// Unlock releases a non-exclusive lock for the repo.
func (rl *repoLocker) Unlock(name string) {
rl.ensureLock(name).RUnlock()
}
func (rl *repoLocker) ensureLock(name string) *sync.RWMutex {
rl.mu.Lock()
defer rl.mu.Unlock()
if _, ok := rl.locks[name]; !ok {
rl.locks[name] = new(sync.RWMutex)
}
return rl.locks[name]
}

View File

@ -0,0 +1,84 @@
package restic
import (
"github.com/pkg/errors"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
)
const (
CredentialsSecretName = "ark-restic-credentials"
CredentialsKey = "ark-restic-credentials"
)
func NewRepositoryKey(secretClient corev1client.SecretsGetter, namespace string, data []byte) error {
secret := &corev1api.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: CredentialsSecretName,
},
Type: corev1api.SecretTypeOpaque,
Data: map[string][]byte{
CredentialsKey: data,
},
}
_, err := secretClient.Secrets(namespace).Create(secret)
if err != nil {
return errors.WithStack(err)
}
return nil
}
type SecretGetter interface {
GetSecret(namespace, name string) (*corev1api.Secret, error)
}
type clientSecretGetter struct {
client corev1client.SecretsGetter
}
func NewClientSecretGetter(client corev1client.SecretsGetter) SecretGetter {
return &clientSecretGetter{client: client}
}
func (c *clientSecretGetter) GetSecret(namespace, name string) (*corev1api.Secret, error) {
secret, err := c.client.Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, errors.WithStack(err)
}
return secret, nil
}
type listerSecretGetter struct {
lister corev1listers.SecretLister
}
func NewListerSecretGetter(lister corev1listers.SecretLister) SecretGetter {
return &listerSecretGetter{lister: lister}
}
func (l *listerSecretGetter) GetSecret(namespace, name string) (*corev1api.Secret, error) {
secret, err := l.lister.Secrets(namespace).Get(name)
if err != nil {
return nil, errors.WithStack(err)
}
return secret, nil
}
func GetRepositoryKey(secretGetter SecretGetter, namespace string) ([]byte, error) {
secret, err := secretGetter.GetSecret(namespace, CredentialsSecretName)
if err != nil {
return nil, err
}
key, found := secret.Data[CredentialsKey]
if !found {
return nil, errors.Errorf("%q secret is missing data for key %q", CredentialsSecretName, CredentialsKey)
}
return key, nil
}

View File

@ -0,0 +1,350 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"context"
"fmt"
"os"
"os/exec"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrs "k8s.io/apimachinery/pkg/util/errors"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
arkv1informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
"github.com/heptio/ark/pkg/util/sync"
)
// RepositoryManager executes commands against restic repositories.
type RepositoryManager interface {
// CheckRepo checks the specified repo for errors.
CheckRepo(name string) error
// CheckAllRepos checks all repos for errors.
CheckAllRepos() error
// PruneRepo deletes unused data from a repo.
PruneRepo(name string) error
// PruneAllRepos deletes unused data from all
// repos.
PruneAllRepos() error
// Forget removes a snapshot from the list of
// available snapshots in a repo.
Forget(snapshot SnapshotIdentifier) error
BackupperFactory
RestorerFactory
}
// BackupperFactory can construct restic backuppers.
type BackupperFactory interface {
// NewBackupper returns a restic backupper for use during a single
// Ark backup.
NewBackupper(context.Context, *arkv1api.Backup) (Backupper, error)
}
// RestorerFactory can construct restic restorers.
type RestorerFactory interface {
// NewRestorer returns a restic restorer for use during a single
// Ark restore.
NewRestorer(context.Context, *arkv1api.Restore) (Restorer, error)
}
type BackendType string
const (
AWSBackend BackendType = "aws"
AzureBackend BackendType = "azure"
GCPBackend BackendType = "gcp"
)
type repositoryManager struct {
objectStore cloudprovider.ObjectStore
config config
arkClient clientset.Interface
secretsLister corev1listers.SecretLister
secretsClient corev1client.SecretsGetter
log logrus.FieldLogger
repoLocker *repoLocker
}
type config struct {
repoPrefix string
bucket string
path string
}
func getConfig(objectStorageConfig arkv1api.ObjectStorageProviderConfig) config {
var (
c = config{}
parts = strings.SplitN(objectStorageConfig.ResticLocation, "/", 2)
)
switch len(parts) {
case 0:
case 1:
c.bucket = parts[0]
default:
c.bucket = parts[0]
c.path = parts[1]
}
switch BackendType(objectStorageConfig.Name) {
case AWSBackend:
var url string
switch {
// non-AWS, S3-compatible object store
case objectStorageConfig.Config != nil && objectStorageConfig.Config["s3Url"] != "":
url = objectStorageConfig.Config["s3Url"]
default:
url = "s3.amazonaws.com"
}
c.repoPrefix = fmt.Sprintf("s3:%s/%s", url, c.bucket)
if c.path != "" {
c.repoPrefix += "/" + c.path
}
case AzureBackend:
c.repoPrefix = fmt.Sprintf("azure:%s:/%s", c.bucket, c.path)
case GCPBackend:
c.repoPrefix = fmt.Sprintf("gs:%s:/%s", c.bucket, c.path)
}
return c
}
// NewRepositoryManager constructs a RepositoryManager.
func NewRepositoryManager(
ctx context.Context,
objectStore cloudprovider.ObjectStore,
config arkv1api.ObjectStorageProviderConfig,
arkClient clientset.Interface,
secretsInformer cache.SharedIndexInformer,
secretsClient corev1client.SecretsGetter,
log logrus.FieldLogger,
) (RepositoryManager, error) {
rm := &repositoryManager{
objectStore: objectStore,
config: getConfig(config),
arkClient: arkClient,
secretsLister: corev1listers.NewSecretLister(secretsInformer.GetIndexer()),
secretsClient: secretsClient,
log: log,
repoLocker: newRepoLocker(),
}
if !cache.WaitForCacheSync(ctx.Done(), secretsInformer.HasSynced) {
return nil, errors.New("timed out waiting for cache to sync")
}
return rm, nil
}
func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *arkv1api.Backup) (Backupper, error) {
informer := arkv1informers.NewFilteredPodVolumeBackupInformer(
rm.arkClient,
backup.Namespace,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.LabelSelector = fmt.Sprintf("%s=%s", arkv1api.BackupUIDLabel, backup.UID)
},
)
b := newBackupper(ctx, rm, informer)
go informer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
return nil, errors.New("timed out waiting for cache to sync")
}
return b, nil
}
func (rm *repositoryManager) NewRestorer(ctx context.Context, restore *arkv1api.Restore) (Restorer, error) {
informer := arkv1informers.NewFilteredPodVolumeRestoreInformer(
rm.arkClient,
restore.Namespace,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.LabelSelector = fmt.Sprintf("%s=%s", arkv1api.RestoreUIDLabel, restore.UID)
},
)
r := newRestorer(ctx, rm, informer)
go informer.Run(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
return nil, errors.New("timed out waiting for cache to sync")
}
return r, nil
}
func (rm *repositoryManager) ensureRepo(name string) error {
repos, err := rm.getAllRepos()
if err != nil {
return err
}
for _, repo := range repos {
if repo == name {
return nil
}
}
rm.repoLocker.LockExclusive(name)
defer rm.repoLocker.UnlockExclusive(name)
// init the repo
cmd := InitCommand(rm.config.repoPrefix, name)
return errorOnly(rm.exec(cmd))
}
func (rm *repositoryManager) getAllRepos() ([]string, error) {
// TODO support rm.config.path
prefixes, err := rm.objectStore.ListCommonPrefixes(rm.config.bucket, "/")
if err != nil {
return nil, err
}
var repos []string
for _, prefix := range prefixes {
if len(prefix) <= 1 {
continue
}
// strip the trailing '/' if it exists
repos = append(repos, strings.TrimSuffix(prefix, "/"))
}
return repos, nil
}
func (rm *repositoryManager) CheckAllRepos() error {
repos, err := rm.getAllRepos()
if err != nil {
return err
}
var eg sync.ErrorGroup
for _, repo := range repos {
this := repo
eg.Go(func() error {
rm.log.WithField("repo", this).Debugf("Checking repo %s", this)
return rm.CheckRepo(this)
})
}
return kerrs.NewAggregate(eg.Wait())
}
func (rm *repositoryManager) PruneAllRepos() error {
repos, err := rm.getAllRepos()
if err != nil {
return err
}
var eg sync.ErrorGroup
for _, repo := range repos {
this := repo
eg.Go(func() error {
rm.log.WithField("repo", this).Debugf("Pre-prune checking repo %s", this)
if err := rm.CheckRepo(this); err != nil {
return err
}
rm.log.WithField("repo", this).Debugf("Pruning repo %s", this)
if err := rm.PruneRepo(this); err != nil {
return err
}
rm.log.WithField("repo", this).Debugf("Post-prune checking repo %s", this)
return rm.CheckRepo(this)
})
}
return kerrs.NewAggregate(eg.Wait())
}
func (rm *repositoryManager) CheckRepo(name string) error {
rm.repoLocker.LockExclusive(name)
defer rm.repoLocker.UnlockExclusive(name)
cmd := CheckCommand(rm.config.repoPrefix, name)
return errorOnly(rm.exec(cmd))
}
func (rm *repositoryManager) PruneRepo(name string) error {
rm.repoLocker.LockExclusive(name)
defer rm.repoLocker.UnlockExclusive(name)
cmd := PruneCommand(rm.config.repoPrefix, name)
return errorOnly(rm.exec(cmd))
}
func (rm *repositoryManager) Forget(snapshot SnapshotIdentifier) error {
rm.repoLocker.LockExclusive(snapshot.Repo)
defer rm.repoLocker.UnlockExclusive(snapshot.Repo)
cmd := ForgetCommand(rm.config.repoPrefix, snapshot.Repo, snapshot.SnapshotID)
return errorOnly(rm.exec(cmd))
}
func (rm *repositoryManager) exec(cmd *Command) ([]byte, error) {
file, err := TempCredentialsFile(rm.secretsLister, cmd.Repo)
if err != nil {
return nil, err
}
// ignore error since there's nothing we can do and it's a temp file.
defer os.Remove(file)
cmd.PasswordFile = file
output, err := cmd.Cmd().Output()
rm.log.WithField("repository", cmd.Repo).Debugf("Ran restic command=%q, output=%s", cmd.String(), output)
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
return nil, errors.Wrapf(err, "error running command, stderr=%s", exitErr.Stderr)
}
return nil, errors.Wrap(err, "error running command")
}
return output, nil
}
func errorOnly(_ interface{}, err error) error {
return err
}

155
pkg/restic/restorer.go Normal file
View File

@ -0,0 +1,155 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"context"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/boolptr"
)
// Restorer can execute restic restores of volumes in a pod.
type Restorer interface {
// RestorePodVolumes restores all annotated volumes in a pod.
RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, log logrus.FieldLogger) []error
}
type restorer struct {
ctx context.Context
repoManager *repositoryManager
resultsLock sync.Mutex
results map[string]chan *arkv1api.PodVolumeRestore
}
func newRestorer(ctx context.Context, rm *repositoryManager, podVolumeRestoreInformer cache.SharedIndexInformer) *restorer {
r := &restorer{
ctx: ctx,
repoManager: rm,
results: make(map[string]chan *arkv1api.PodVolumeRestore),
}
podVolumeRestoreInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, obj interface{}) {
pvr := obj.(*arkv1api.PodVolumeRestore)
if pvr.Status.Phase == arkv1api.PodVolumeRestorePhaseCompleted || pvr.Status.Phase == arkv1api.PodVolumeRestorePhaseFailed {
r.resultsLock.Lock()
r.results[resultsKey(pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name)] <- pvr
r.resultsLock.Unlock()
}
},
},
)
return r
}
func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, log logrus.FieldLogger) []error {
// get volumes to restore from pod's annotations
volumesToRestore := GetPodSnapshotAnnotations(pod)
if len(volumesToRestore) == 0 {
return nil
}
resultsChan := make(chan *arkv1api.PodVolumeRestore)
r.resultsLock.Lock()
r.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan
r.resultsLock.Unlock()
var (
errs []error
numRestores int
)
for volume, snapshot := range volumesToRestore {
r.repoManager.repoLocker.Lock(pod.Namespace)
defer r.repoManager.repoLocker.Unlock(pod.Namespace)
volumeRestore := newPodVolumeRestore(restore, pod, volume, snapshot, r.repoManager.config.repoPrefix)
if err := errorOnly(r.repoManager.arkClient.ArkV1().PodVolumeRestores(volumeRestore.Namespace).Create(volumeRestore)); err != nil {
errs = append(errs, errors.WithStack(err))
continue
}
numRestores++
}
ForEachVolume:
for i := 0; i < numRestores; i++ {
select {
case <-r.ctx.Done():
errs = append(errs, errors.New("timed out waiting for all PodVolumeRestores to complete"))
break ForEachVolume
case res := <-resultsChan:
if res.Status.Phase == arkv1api.PodVolumeRestorePhaseFailed {
errs = append(errs, errors.Errorf("pod volume restore failed: %s", res.Status.Message))
}
}
}
r.resultsLock.Lock()
delete(r.results, resultsKey(pod.Namespace, pod.Name))
r.resultsLock.Unlock()
return errs
}
func newPodVolumeRestore(restore *arkv1api.Restore, pod *corev1api.Pod, volume, snapshot, repoPrefix string) *arkv1api.PodVolumeRestore {
return &arkv1api.PodVolumeRestore{
ObjectMeta: metav1.ObjectMeta{
Namespace: restore.Namespace,
GenerateName: restore.Name + "-",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: arkv1api.SchemeGroupVersion.String(),
Kind: "Restore",
Name: restore.Name,
UID: restore.UID,
Controller: boolptr.True(),
},
},
Labels: map[string]string{
arkv1api.RestoreNameLabel: restore.Name,
arkv1api.RestoreUIDLabel: string(restore.UID),
arkv1api.PodUIDLabel: string(pod.UID),
},
},
Spec: arkv1api.PodVolumeRestoreSpec{
Pod: corev1api.ObjectReference{
Kind: "Pod",
Namespace: pod.Namespace,
Name: pod.Name,
UID: pod.UID,
},
Volume: volume,
SnapshotID: snapshot,
RepoPrefix: repoPrefix,
},
}
}

View File

@ -1,90 +0,0 @@
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
)
// how long should we wait for certain objects (e.g. PVs, PVCs) to reach
// their specified conditions before continuing on.
const objectCreateWaitTimeout = 30 * time.Second
// resourceWaiter knows how to wait for a set of registered items to become "ready" (according
// to a provided readyFunc) based on listening to a channel of Events. The correct usage
// of this struct is to construct it, register all of the desired items to wait for via
// RegisterItem, and then to Wait() for them to become ready or the timeout to be exceeded.
type resourceWaiter struct {
itemWatch watch.Interface
watchChan <-chan watch.Event
items sets.String
readyFunc func(runtime.Unstructured) bool
}
func newResourceWaiter(itemWatch watch.Interface, readyFunc func(runtime.Unstructured) bool) *resourceWaiter {
return &resourceWaiter{
itemWatch: itemWatch,
watchChan: itemWatch.ResultChan(),
items: sets.NewString(),
readyFunc: readyFunc,
}
}
// RegisterItem adds the specified key to a list of items to listen for events for.
func (rw *resourceWaiter) RegisterItem(key string) {
rw.items.Insert(key)
}
// Wait listens for events on the watchChan related to items that have been registered,
// and returns when either all of them have become ready according to readyFunc, or when
// the timeout has been exceeded.
func (rw *resourceWaiter) Wait() error {
for {
if rw.items.Len() <= 0 {
return nil
}
timeout := time.NewTimer(objectCreateWaitTimeout)
select {
case event := <-rw.watchChan:
obj, ok := event.Object.(*unstructured.Unstructured)
if !ok {
return errors.Errorf("Unexpected type %T", event.Object)
}
if event.Type == watch.Added || event.Type == watch.Modified {
if rw.items.Has(obj.GetName()) && rw.readyFunc(obj) {
rw.items.Delete(obj.GetName())
}
}
case <-timeout.C:
return errors.New("failed to observe all items becoming ready within the timeout")
}
}
}
func (rw *resourceWaiter) Stop() {
rw.itemWatch.Stop()
}

View File

@ -0,0 +1,111 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/kube"
)
type resticRestoreAction struct {
logger logrus.FieldLogger
}
func NewResticRestoreAction(logger logrus.FieldLogger) ItemAction {
return &resticRestoreAction{
logger: logger,
}
}
func (a *resticRestoreAction) AppliesTo() (ResourceSelector, error) {
return ResourceSelector{
IncludedResources: []string{"pods"},
}, nil
}
func (a *resticRestoreAction) Execute(obj runtime.Unstructured, restore *api.Restore) (runtime.Unstructured, error, error) {
a.logger.Info("Executing resticRestoreAction")
defer a.logger.Info("Done executing resticRestoreAction")
var pod corev1.Pod
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &pod); err != nil {
return nil, nil, errors.Wrap(err, "unable to convert pod from runtime.Unstructured")
}
log := a.logger.WithField("pod", kube.NamespaceAndName(&pod))
volumeSnapshots := restic.GetPodSnapshotAnnotations(&pod)
if len(volumeSnapshots) == 0 {
log.Debug("No restic snapshot ID annotations found")
return obj, nil, nil
}
log.Info("Restic snapshot ID annotations found")
initContainer := corev1.Container{
Name: restic.InitContainer,
Image: "gcr.io/heptio-images/restic-init-container:latest",
Args: []string{string(restore.UID)},
Env: []corev1.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
},
}
for volumeName := range volumeSnapshots {
mount := corev1.VolumeMount{
Name: volumeName,
MountPath: "/restores/" + volumeName,
}
initContainer.VolumeMounts = append(initContainer.VolumeMounts, mount)
}
if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != "restic-wait" {
pod.Spec.InitContainers = append([]corev1.Container{initContainer}, pod.Spec.InitContainers...)
} else {
pod.Spec.InitContainers[0] = initContainer
}
res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pod)
if err != nil {
return nil, nil, errors.Wrap(err, "unable to convert pod to runtime.Unstructured")
}
return &unstructured.Unstructured{Object: res}, nil, nil
}

View File

@ -19,6 +19,7 @@ package restore
import (
"archive/tar"
"compress/gzip"
go_context "context"
"encoding/json"
"fmt"
"io"
@ -26,6 +27,8 @@ import (
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -38,7 +41,9 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
@ -47,10 +52,12 @@ import (
"github.com/heptio/ark/pkg/discovery"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/boolptr"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
arksync "github.com/heptio/ark/pkg/util/sync"
)
// Restorer knows how to restore a backup.
@ -64,15 +71,17 @@ type kindString string
// kubernetesRestorer implements Restorer for restoring into a Kubernetes cluster.
type kubernetesRestorer struct {
discoveryHelper discovery.Helper
dynamicFactory client.DynamicFactory
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
backupClient arkv1client.BackupsGetter
namespaceClient corev1.NamespaceInterface
resourcePriorities []string
fileSystem FileSystem
logger logrus.FieldLogger
discoveryHelper discovery.Helper
dynamicFactory client.DynamicFactory
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
backupClient arkv1client.BackupsGetter
namespaceClient corev1.NamespaceInterface
resticRestorerFactory restic.RestorerFactory
resticTimeout time.Duration
resourcePriorities []string
fileSystem FileSystem
logger logrus.FieldLogger
}
// prioritizeResources returns an ordered, fully-resolved list of resources to restore based on
@ -142,18 +151,22 @@ func NewKubernetesRestorer(
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
namespaceClient corev1.NamespaceInterface,
resticRestorerFactory restic.RestorerFactory,
resticTimeout time.Duration,
logger logrus.FieldLogger,
) (Restorer, error) {
return &kubernetesRestorer{
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
backupService: backupService,
snapshotService: snapshotService,
backupClient: backupClient,
namespaceClient: namespaceClient,
resourcePriorities: resourcePriorities,
fileSystem: &osFileSystem{},
logger: logger,
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
backupService: backupService,
snapshotService: snapshotService,
backupClient: backupClient,
namespaceClient: namespaceClient,
resticRestorerFactory: resticRestorerFactory,
resticTimeout: resticTimeout,
resourcePriorities: resourcePriorities,
fileSystem: &osFileSystem{},
logger: logger,
}, nil
}
@ -195,7 +208,28 @@ func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup,
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
ctx := &context{
podVolumeTimeout := kr.resticTimeout
if val := restore.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" {
parsed, err := time.ParseDuration(val)
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val)
} else {
podVolumeTimeout = parsed
}
}
ctx, cancelFunc := go_context.WithTimeout(go_context.Background(), podVolumeTimeout)
defer cancelFunc()
var resticRestorer restic.Restorer
if kr.resticRestorerFactory != nil {
resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, restore)
if err != nil {
return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}}
}
}
restoreCtx := &context{
backup: backup,
backupReader: backupReader,
restore: restore,
@ -207,10 +241,10 @@ func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup,
namespaceClient: kr.namespaceClient,
actions: resolvedActions,
snapshotService: kr.snapshotService,
waitForPVs: true,
resticRestorer: resticRestorer,
}
return ctx.execute()
return restoreCtx.execute()
}
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
@ -286,7 +320,10 @@ type context struct {
namespaceClient corev1.NamespaceInterface
actions []resolvedAction
snapshotService cloudprovider.SnapshotService
waitForPVs bool
resticRestorer restic.Restorer
globalWaitGroup arksync.ErrorGroup
resourceWaitGroup sync.WaitGroup
resourceWatches []watch.Interface
}
func (ctx *context) infof(msg string, args ...interface{}) {
@ -342,6 +379,16 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe
existingNamespaces := sets.NewString()
// TODO this is not optimal since it'll keep watches open for all resources/namespaces
// until the very end of the restore. This should be done per resource type. Deferring
// refactoring for now since this may be able to be removed entirely if we eliminate
// waiting for PV snapshot restores.
defer func() {
for _, watch := range ctx.resourceWatches {
watch.Stop()
}
}()
for _, resource := range ctx.prioritizedResources {
// we don't want to explicitly restore namespace API objs because we'll handle
// them as a special case prior to restoring anything into them
@ -424,6 +471,23 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe
merge(&warnings, &w)
merge(&errs, &e)
}
// TODO timeout?
ctx.logger.Debugf("Waiting on resource wait group for resource=%s", resource.String())
ctx.resourceWaitGroup.Wait()
ctx.logger.Debugf("Done waiting on resource wait group for resource=%s", resource.String())
}
// TODO timeout?
ctx.logger.Debug("Waiting on global wait group")
waitErrs := ctx.globalWaitGroup.Wait()
ctx.logger.Debug("Done waiting on global wait group")
for _, err := range waitErrs {
// TODO not ideal to be adding these to Ark-level errors
// rather than a specific namespace, but don't have a way
// to track the namespace right now.
errs.Ark = append(errs.Ark, err.Error())
}
return warnings, errs
@ -524,9 +588,9 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
var (
resourceClient client.Dynamic
waiter *resourceWaiter
groupResource = schema.ParseGroupResource(resource)
applicableActions []resolvedAction
resourceWatch watch.Interface
)
// pre-filter the actions based on namespace & resource includes/excludes since
@ -556,8 +620,12 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
}
if hasControllerOwner(obj.GetOwnerReferences()) {
ctx.infof("%s/%s has a controller owner - skipping", obj.GetNamespace(), obj.GetName())
continue
// non-pods with controller owners shouldn't be restored; pods with controller
// owners should only be restored if they have restic snapshots to restore
if groupResource != kuberesource.Pods || !restic.PodHasSnapshotAnnotation(obj) {
ctx.infof("%s has a controller owner - skipping", kube.NamespaceAndName(obj))
continue
}
}
complete, err := isCompleted(obj, groupResource)
@ -597,16 +665,23 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
}
obj = updatedObj
// wait for the PV to be ready
if ctx.waitForPVs {
pvWatch, err := resourceClient.Watch(metav1.ListOptions{})
if resourceWatch == nil {
resourceWatch, err = resourceClient.Watch(metav1.ListOptions{})
if err != nil {
addToResult(&errs, namespace, fmt.Errorf("error watching for namespace %q, resource %q: %v", namespace, &groupResource, err))
return warnings, errs
}
ctx.resourceWatches = append(ctx.resourceWatches, resourceWatch)
waiter = newResourceWaiter(pvWatch, isPVReady)
defer waiter.Stop()
ctx.resourceWaitGroup.Add(1)
go func() {
defer ctx.resourceWaitGroup.Done()
if _, err := waitForReady(resourceWatch.ResultChan(), obj.GetName(), isPVReady, time.Minute, ctx.logger); err != nil {
ctx.logger.Warnf("Timeout reached waiting for persistent volume %s to become ready", obj.GetName())
addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", obj.GetName()))
}
}()
}
}
@ -655,7 +730,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
addLabel(obj, api.RestoreLabelKey, ctx.restore.Name)
ctx.infof("Restoring %s: %v", obj.GroupVersionKind().Kind, obj.GetName())
_, restoreErr := resourceClient.Create(obj)
createdObj, restoreErr := resourceClient.Create(obj)
if apierrors.IsAlreadyExists(restoreErr) {
equal := false
if fromCluster, err := resourceClient.Get(obj.GetName(), metav1.GetOptions{}); err == nil {
@ -680,20 +755,71 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
continue
}
if waiter != nil {
waiter.RegisterItem(obj.GetName())
}
}
if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 {
if ctx.resticRestorer == nil {
ctx.logger.Warn("No restic restorer, not restoring pod's volumes")
} else {
ctx.globalWaitGroup.GoErrorSlice(func() []error {
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil {
ctx.logger.WithError(err).Error("error converting unstructured pod")
return []error{err}
}
if waiter != nil {
if err := waiter.Wait(); err != nil {
addArkError(&errs, fmt.Errorf("error waiting for all %v resources to be created in namespace %s: %v", &groupResource, namespace, err))
if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, ctx.logger); errs != nil {
ctx.logger.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes")
return errs
}
return nil
})
}
}
}
return warnings, errs
}
func waitForReady(
watchChan <-chan watch.Event,
name string,
ready func(runtime.Unstructured) bool,
timeout time.Duration,
log logrus.FieldLogger,
) (*unstructured.Unstructured, error) {
var timeoutChan <-chan time.Time
if timeout != 0 {
timeoutChan = time.After(timeout)
} else {
timeoutChan = make(chan time.Time)
}
for {
select {
case event := <-watchChan:
if event.Type != watch.Added && event.Type != watch.Modified {
continue
}
obj, ok := event.Object.(*unstructured.Unstructured)
switch {
case !ok:
log.Errorf("Unexpected type %T", event.Object)
continue
case obj.GetName() != name:
continue
case !ready(obj):
log.Debugf("Item %s is not ready yet", name)
continue
default:
return obj, nil
}
case <-timeoutChan:
return nil, errors.New("failed to observe item becoming ready within the timeout")
}
}
}
func (ctx *context) executePVAction(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
pvName := obj.GetName()
if pvName == "" {

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
@ -547,6 +548,7 @@ func TestRestoreResourceForNamespace(t *testing.T) {
pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false}
dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, test.namespace).Return(resourceClient, nil)
resourceClient.On("Watch", metav1.ListOptions{}).Return(&fakeWatch{}, nil)
ctx := &context{
dynamicFactory: dynamicFactory,
@ -576,6 +578,14 @@ func TestRestoreResourceForNamespace(t *testing.T) {
}
}
type fakeWatch struct{}
func (w *fakeWatch) Stop() {}
func (w *fakeWatch) ResultChan() <-chan watch.Event {
return make(chan watch.Event)
}
func TestHasControllerOwner(t *testing.T) {
tests := []struct {
name string
@ -735,7 +745,7 @@ func TestIsCompleted(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
u := unstructuredOrDie(test.content)
u := arktest.UnstructuredOrDie(test.content)
backup, err := isCompleted(u, test.groupResource)
if assert.Equal(t, test.expectedErr, err != nil) {
@ -776,15 +786,15 @@ func TestObjectsAreEqual(t *testing.T) {
},
{
name: "Test JSON objects",
backupObj: unstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"default","namespace":"nginx-example", "labels": {"ark-restore": "test"}},"secrets":[{"name":"default-token-xhjjc"}]}`),
clusterObj: unstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"creationTimestamp":"2018-04-05T20:12:21Z","name":"default","namespace":"nginx-example","resourceVersion":"650","selfLink":"/api/v1/namespaces/nginx-example/serviceaccounts/default","uid":"a5a3d2a2-390d-11e8-9644-42010a960002"},"secrets":[{"name":"default-token-xhjjc"}]}`),
backupObj: arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"default","namespace":"nginx-example", "labels": {"ark-restore": "test"}},"secrets":[{"name":"default-token-xhjjc"}]}`),
clusterObj: arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"creationTimestamp":"2018-04-05T20:12:21Z","name":"default","namespace":"nginx-example","resourceVersion":"650","selfLink":"/api/v1/namespaces/nginx-example/serviceaccounts/default","uid":"a5a3d2a2-390d-11e8-9644-42010a960002"},"secrets":[{"name":"default-token-xhjjc"}]}`),
expectedErr: false,
expectedRes: true,
},
{
name: "Test ServiceAccount secrets mismatch",
backupObj: unstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"default","namespace":"nginx-example", "labels": {"ark-restore": "test"}},"secrets":[{"name":"default-token-abcde"}]}`),
clusterObj: unstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"creationTimestamp":"2018-04-05T20:12:21Z","name":"default","namespace":"nginx-example","resourceVersion":"650","selfLink":"/api/v1/namespaces/nginx-example/serviceaccounts/default","uid":"a5a3d2a2-390d-11e8-9644-42010a960002"},"secrets":[{"name":"default-token-xhjjc"}]}`),
backupObj: arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"default","namespace":"nginx-example", "labels": {"ark-restore": "test"}},"secrets":[{"name":"default-token-abcde"}]}`),
clusterObj: arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"creationTimestamp":"2018-04-05T20:12:21Z","name":"default","namespace":"nginx-example","resourceVersion":"650","selfLink":"/api/v1/namespaces/nginx-example/serviceaccounts/default","uid":"a5a3d2a2-390d-11e8-9644-42010a960002"},"secrets":[{"name":"default-token-xhjjc"}]}`),
expectedErr: false,
expectedRes: false,
},
@ -968,16 +978,6 @@ func TestIsPVReady(t *testing.T) {
}
}
// Copied from backup/backup_test.go for JSON testing.
// TODO: move this into util/test for re-use.
func unstructuredOrDie(data string) *unstructured.Unstructured {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
if err != nil {
panic(err)
}
return o.(*unstructured.Unstructured)
}
type testUnstructured struct {
*unstructured.Unstructured
}

View File

@ -21,10 +21,11 @@ import (
"github.com/pkg/errors"
"k8s.io/api/core/v1"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
)
// NamespaceAndName returns a string in the format <namespace>/<name>
@ -39,7 +40,7 @@ func NamespaceAndName(objMeta metav1.Object) string {
// a bool indicating whether or not the namespace was created, and an error if the create failed
// for a reason other than that the namespace already exists. Note that in the case where the
// namespace already exists, this function will return (false, nil).
func EnsureNamespaceExists(namespace *v1.Namespace, client corev1.NamespaceInterface) (bool, error) {
func EnsureNamespaceExists(namespace *corev1api.Namespace, client corev1client.NamespaceInterface) (bool, error) {
if _, err := client.Create(namespace); err == nil {
return true, nil
} else if apierrors.IsAlreadyExists(err) {
@ -48,3 +49,31 @@ func EnsureNamespaceExists(namespace *v1.Namespace, client corev1.NamespaceInter
return false, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
}
}
// GetVolumeDirectory gets the name of the directory on the host, under /var/lib/kubelet/pods/<podUID>/volumes/,
// where the specified volume lives.
func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister) (string, error) {
var volume *corev1api.Volume
for _, item := range pod.Spec.Volumes {
if item.Name == volumeName {
volume = &item
break
}
}
if volume == nil {
return "", errors.New("volume not found in pod")
}
if volume.VolumeSource.PersistentVolumeClaim == nil {
return volume.Name, nil
}
pvc, err := pvcLister.PersistentVolumeClaims(pod.Namespace).Get(volume.VolumeSource.PersistentVolumeClaim.ClaimName)
if err != nil {
return "", errors.WithStack(err)
}
return pvc.Spec.VolumeName, nil
}

View File

@ -0,0 +1,27 @@
package logging
import (
"github.com/sirupsen/logrus"
)
// DefaultHooks returns a slice of the default
// logrus hooks to be used by a logger.
func DefaultHooks() []logrus.Hook {
return []logrus.Hook{
&LogLocationHook{},
&ErrorLocationHook{},
}
}
// DefaultLogger returns a Logger with the default properties
// and hooks.
func DefaultLogger(level logrus.Level) *logrus.Logger {
logger := logrus.New()
logger.Level = level
for _, hook := range DefaultHooks() {
logger.Hooks.Add(hook)
}
return logger
}

View File

@ -0,0 +1,60 @@
package logging
import (
"sort"
"strings"
"github.com/sirupsen/logrus"
"github.com/heptio/ark/pkg/cmd/util/flag"
)
var sortedLogLevels = sortLogLevels()
// LevelFlag is a command-line flag for setting the logrus
// log level.
type LevelFlag struct {
*flag.Enum
defaultValue logrus.Level
}
// LogLevelFlag constructs a new log level flag.
func LogLevelFlag(defaultValue logrus.Level) *LevelFlag {
return &LevelFlag{
Enum: flag.NewEnum(defaultValue.String(), sortedLogLevels...),
defaultValue: defaultValue,
}
}
// Parse returns the flag's value as a logrus.Level.
func (f *LevelFlag) Parse() logrus.Level {
if parsed, err := logrus.ParseLevel(f.String()); err == nil {
return parsed
}
// This should theoretically never happen assuming the enum flag
// is constructed correctly because the enum flag will not allow
// an invalid value to be set.
logrus.Errorf("log-level flag has invalid value %s", strings.ToUpper(f.String()))
return f.defaultValue
}
// sortLogLevels returns a string slice containing all of the valid logrus
// log levels (based on logrus.AllLevels), sorted in ascending order of severity.
func sortLogLevels() []string {
var (
sortedLogLevels = make([]logrus.Level, len(logrus.AllLevels))
logLevelsStrings []string
)
copy(sortedLogLevels, logrus.AllLevels)
// logrus.Panic has the lowest value, so the compare function uses ">"
sort.Slice(sortedLogLevels, func(i, j int) bool { return sortedLogLevels[i] > sortedLogLevels[j] })
for _, level := range sortedLogLevels {
logLevelsStrings = append(logLevelsStrings, level.String())
}
return logLevelsStrings
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import "sync"
// An ErrorGroup waits for a collection of goroutines that return errors to finish.
// The main goroutine calls Go one or more times to execute a function that returns
// an error in a goroutine. Then it calls Wait to wait for all goroutines to finish
// and collect the results of each.
type ErrorGroup struct {
wg sync.WaitGroup
errChan chan error
}
// Go runs the specified function in a goroutine.
func (eg *ErrorGroup) Go(action func() error) {
if eg.errChan == nil {
eg.errChan = make(chan error)
}
eg.wg.Add(1)
go func() {
eg.errChan <- action()
eg.wg.Done()
}()
}
// GoErrorSlice runs a function that returns a slice of errors
// in a goroutine.
func (eg *ErrorGroup) GoErrorSlice(action func() []error) {
if eg.errChan == nil {
eg.errChan = make(chan error)
}
eg.wg.Add(1)
go func() {
for _, err := range action() {
eg.errChan <- err
}
eg.wg.Done()
}()
}
// Wait waits for all functions run via Go to finish,
// and returns all of their errors.
func (eg *ErrorGroup) Wait() []error {
var errs []error
go func() {
for {
errs = append(errs, <-eg.errChan)
}
}()
eg.wg.Wait()
return errs
}

21
pkg/util/test/helpers.go Normal file
View File

@ -0,0 +1,21 @@
package test
import (
"encoding/json"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func UnstructuredOrDie(data string) *unstructured.Unstructured {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
if err != nil {
panic(err)
}
return o.(*unstructured.Unstructured)
}
func GetAsMap(j string) (map[string]interface{}, error) {
m := make(map[string]interface{})
err := json.Unmarshal([]byte(j), &m)
return m, err
}

View File

@ -0,0 +1,17 @@
package test
import (
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/mock"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
type MockPodCommandExecutor struct {
mock.Mock
}
func (e *MockPodCommandExecutor) ExecutePodCommand(log logrus.FieldLogger, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error {
args := e.Called(log, item, namespace, name, hookName, hook)
return args.Error(0)
}

21
restic/complete-restore.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
set -o errexit
set -o nounset
set -o pipefail
# resolve the wildcards in the directories
RESTORE_DIR=$(cd /restores/$1/host_pods/*/volumes/*/$2 && echo $PWD)
VOLUME_DIR=$(cd /host_pods/$1/volumes/*/$2 && echo $PWD)
# the mv command fails when the source directory is empty,
# so check first.
if [ -n "$(ls -A $RESTORE_DIR)" ]; then
mv "$RESTORE_DIR"/* $VOLUME_DIR/
fi
# cleanup
rm -rf "$RESTORE_DIR"
# write the done file for the init container to pick up
mkdir -p "$VOLUME_DIR"/.ark && touch "$VOLUME_DIR"/.ark/$3