Rename Ark to Velero!!!
Signed-off-by: Nolan Brubaker <brubakern@vmware.com>pull/1184/head
parent
bbc6caf7fe
commit
43714caaec
|
@ -14,11 +14,11 @@ about: Tell us about a problem you are experiencing
|
|||
**The output of the following commands will help us better understand what's going on**:
|
||||
(Pasting long output into a [GitHub gist](https://gist.github.com) or other pastebin is fine.)
|
||||
|
||||
* `kubectl logs deployment/ark -n heptio-ark`
|
||||
* `ark backup describe <backupname>` or `kubectl get backup/<backupname> -n heptio-ark -o yaml`
|
||||
* `ark backup logs <backupname>`
|
||||
* `ark restore describe <restorename>` or `kubectl get restore/<restorename> -n heptio-ark -o yaml`
|
||||
* `ark restore logs <restorename>`
|
||||
* `kubectl logs deployment/velero -n velero`
|
||||
* `velero backup describe <backupname>` or `kubectl get backup/<backupname> -n velero -o yaml`
|
||||
* `velero backup logs <backupname>`
|
||||
* `velero restore describe <restorename>` or `kubectl get restore/<restorename> -n velero -o yaml`
|
||||
* `velero restore logs <restorename>`
|
||||
|
||||
|
||||
**Anything else you would like to add:**
|
||||
|
@ -27,7 +27,7 @@ about: Tell us about a problem you are experiencing
|
|||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Velero version (use `velero version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
|
|
|
@ -14,7 +14,7 @@ about: Suggest an idea for this project
|
|||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Velero version (use `velero version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
|
|
|
@ -27,7 +27,7 @@ _testmain.go
|
|||
|
||||
debug
|
||||
|
||||
/ark
|
||||
/velero
|
||||
.idea/
|
||||
|
||||
.container-*
|
||||
|
|
|
@ -17,7 +17,7 @@ before:
|
|||
hooks:
|
||||
- ./hack/set-example-tags.sh
|
||||
builds:
|
||||
- main: ./cmd/ark/main.go
|
||||
- main: ./cmd/velero/main.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
|
@ -39,7 +39,7 @@ builds:
|
|||
- goos: windows
|
||||
goarch: arm64
|
||||
ldflags:
|
||||
- -X "github.com/heptio/ark/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/ark/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/ark/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
- -X "github.com/heptio/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
archive:
|
||||
name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}"
|
||||
files:
|
||||
|
@ -50,5 +50,5 @@ checksum:
|
|||
release:
|
||||
github:
|
||||
owner: heptio
|
||||
name: ark
|
||||
name: velero
|
||||
draft: true
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Heptio Ark Community Code of Conduct
|
||||
# Velero Community Code of Conduct
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ should be a new file created in the `changelogs/unreleased` folder. The file sho
|
|||
naming convention of `pr-username` and the contents of the file should be your text for the
|
||||
changelog.
|
||||
|
||||
ark/changelogs/unreleased <- folder
|
||||
velero/changelogs/unreleased <- folder
|
||||
000-username <- file
|
||||
|
||||
|
||||
|
|
|
@ -16,8 +16,8 @@ FROM alpine:3.8
|
|||
|
||||
MAINTAINER Steve Kriss <steve@heptio.com>
|
||||
|
||||
ADD /bin/linux/amd64/ark-restic-restore-helper .
|
||||
ADD /bin/linux/amd64/velero-restic-restore-helper .
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT [ "/ark-restic-restore-helper" ]
|
||||
ENTRYPOINT [ "/velero-restic-restore-helper" ]
|
|
@ -24,8 +24,8 @@ RUN apk add --update --no-cache bzip2 && \
|
|||
mv restic_0.9.3_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/amd64/ark /ark
|
||||
ADD /bin/linux/amd64/velero /velero
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/ark"]
|
||||
ENTRYPOINT ["/velero"]
|
12
Makefile
12
Makefile
|
@ -15,10 +15,10 @@
|
|||
# limitations under the License.
|
||||
|
||||
# The binary to build (just the basename).
|
||||
BIN ?= ark
|
||||
BIN ?= velero
|
||||
|
||||
# This repo's root import path (under GOPATH).
|
||||
PKG := github.com/heptio/ark
|
||||
PKG := github.com/heptio/velero
|
||||
|
||||
# Where to push the docker image.
|
||||
REGISTRY ?= gcr.io/heptio-images
|
||||
|
@ -63,7 +63,7 @@ IMAGE = $(REGISTRY)/$(BIN)
|
|||
# If you want to build AND push all containers, see the 'all-push' rule.
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=ark-restic-restore-helper
|
||||
@$(MAKE) build BIN=velero-restic-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
|
@ -104,7 +104,7 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
|||
|
||||
TTY := $(shell tty -s && echo "-t")
|
||||
|
||||
BUILDER_IMAGE := ark-builder
|
||||
BUILDER_IMAGE := velero-builder
|
||||
|
||||
# Example: make shell CMD="date > datefile"
|
||||
shell: build-dirs build-image
|
||||
|
@ -146,7 +146,7 @@ endif
|
|||
|
||||
all-containers:
|
||||
$(MAKE) container
|
||||
$(MAKE) container BIN=ark-restic-restore-helper
|
||||
$(MAKE) container BIN=velero-restic-restore-helper
|
||||
$(MAKE) build-fsfreeze
|
||||
|
||||
container: verify test .container-$(DOTFILE_IMAGE) container-name
|
||||
|
@ -160,7 +160,7 @@ container-name:
|
|||
|
||||
all-push:
|
||||
$(MAKE) push
|
||||
$(MAKE) push BIN=ark-restic-restore-helper
|
||||
$(MAKE) push BIN=velero-restic-restore-helper
|
||||
$(MAKE) push-fsfreeze
|
||||
|
||||
|
||||
|
|
44
README.md
44
README.md
|
@ -1,35 +1,36 @@
|
|||
# Heptio Ark
|
||||
# Velero
|
||||
|
||||
**Maintainers:** [Heptio][0]
|
||||

|
||||
|
||||
[![Build Status][1]][2] <a href="https://zenhub.com"><img src="https://raw.githubusercontent.com/ZenHubIO/support/master/zenhub-badge.png"></a>
|
||||
|
||||
## Heptio Ark is now Velero!
|
||||
|
||||
#### We're working on our first Velero release and instructions for migrating your Ark deployments to Velero. Stay tuned!
|
||||
|
||||
## Overview
|
||||
|
||||
Ark gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Ark lets you:
|
||||
Velero gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Velero lets you:
|
||||
|
||||
* Take backups of your cluster and restore in case of loss.
|
||||
* Copy cluster resources to other clusters.
|
||||
* Replicate your production environment for development and testing environments.
|
||||
|
||||
Ark consists of:
|
||||
Velero consists of:
|
||||
|
||||
* A server that runs on your cluster
|
||||
* A command-line client that runs locally
|
||||
|
||||
You can run Ark in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
You can run Velero in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
|
||||
## Breaking changes
|
||||
|
||||
Ark version 0.10.0 introduces a number of breaking changes. Before you upgrade to version 0.10.0, make sure to read [the documentation on upgrading][98].
|
||||
|
||||
## More information
|
||||
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Ark, and more.
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Velero, and more.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
|
||||
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#velero channel][25] on the Kubernetes Slack server.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
@ -51,29 +52,26 @@ Feedback and discussion are available on [the mailing list][24].
|
|||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[0]: https://github.com/heptio
|
||||
[1]: https://travis-ci.org/heptio/ark.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/ark
|
||||
[1]: https://travis-ci.org/heptio/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/velero
|
||||
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[5]: https://github.com/heptio/ark/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/ark/releases
|
||||
[4]: https://github.com/heptio/velero/issues
|
||||
[5]: https://github.com/heptio/velero/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/velero/releases
|
||||
|
||||
[8]: https://github.com/heptio/ark/blob/master/CODE_OF_CONDUCT.md
|
||||
[8]: https://github.com/heptio/velero/blob/master/CODE_OF_CONDUCT.md
|
||||
[9]: https://kubernetes.io/docs/setup/
|
||||
[10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos
|
||||
[11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1
|
||||
[12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md
|
||||
[14]: https://github.com/kubernetes/kubernetes
|
||||
|
||||
|
||||
[24]: http://j.hept.io/ark-list
|
||||
[25]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[26]: https://github.com/heptio/ark/blob/master/docs/zenhub.md
|
||||
[24]: https://groups.google.com/forum/#!forum/projectvelero
|
||||
[25]: https://kubernetes.slack.com/messages/velero
|
||||
[26]: https://github.com/heptio/velero/blob/master/docs/zenhub.md
|
||||
|
||||
|
||||
[29]: https://heptio.github.io/ark/
|
||||
[29]: https://heptio.github.io/velero/
|
||||
[30]: /docs/troubleshooting.md
|
||||
|
||||
[98]: /docs/upgrading-to-v0.10.md
|
||||
[99]: /docs/support-matrix.md
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Ark Support
|
||||
# Velero Support
|
||||
|
||||
Thanks for trying out Ark! We welcome all feedback, please consider joining our mailing list:
|
||||
Thanks for trying out Velero! We welcome all feedback, please consider joining our mailing list:
|
||||
|
||||
- [Mailing List](http://j.hept.io/ark-list)
|
||||
- [Mailing List](http://j.hept.io/ark-list)
|
||||
|
|
|
@ -245,5 +245,5 @@ need to be updated for v0.10.
|
|||
- [eabef085](https://github.com/heptio/ark/commit/eabef085) Update generated Ark code based on the 1.11 k8s.io/code-generator script
|
||||
- [f5eac0b4](https://github.com/heptio/ark/commit/f5eac0b4) Update vendored library code for Kubernetes 1.11
|
||||
|
||||
[1]: https://github.com/heptio/ark/blob/master/docs/upgrading-to-v0.10.md
|
||||
[1]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10
|
||||
[2]: locations.md
|
||||
|
|
|
@ -77,9 +77,9 @@
|
|||
here are the steps you can take to upgrade:
|
||||
|
||||
1. Execute the steps from the **Credentials and configuration** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#credentials-and-configuration)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#credentials-and-configuration)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#credentials-and-configuration)
|
||||
* [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#credentials-and-configuration)
|
||||
* [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#credentials-and-configuration)
|
||||
* [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#credentials-and-configuration)
|
||||
|
||||
When you get to the secret creation step, if you don't have your `credentials-ark` file handy,
|
||||
you can copy the existing secret from your `heptio-ark-server` namespace into the `heptio-ark` namespace:
|
||||
|
@ -95,6 +95,6 @@
|
|||
```
|
||||
|
||||
3. Execute the commands from the **Start the server** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#start-the-server)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#start-the-server)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#start-the-server)
|
||||
* [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#start-the-server)
|
||||
* [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#start-the-server)
|
||||
* [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#start-the-server)
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Renamed Heptio Ark to Velero. Changed internal imports, environment variables, and binary name.
|
|
@ -45,7 +45,7 @@ func main() {
|
|||
}
|
||||
|
||||
// done returns true if for each directory under /restores, a file exists
|
||||
// within the .ark/ subdirectory whose name is equal to os.Args[1], or
|
||||
// within the .velero/ subdirectory whose name is equal to os.Args[1], or
|
||||
// false otherwise
|
||||
func done() bool {
|
||||
children, err := ioutil.ReadDir("/restores")
|
||||
|
@ -60,7 +60,7 @@ func done() bool {
|
|||
continue
|
||||
}
|
||||
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1])
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".velero", os.Args[1])
|
||||
|
||||
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
|
||||
fmt.Printf("Not found: %s\n", doneFile)
|
|
@ -22,8 +22,8 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/ark"
|
||||
"github.com/heptio/velero/pkg/cmd"
|
||||
"github.com/heptio/velero/pkg/cmd/velero"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -31,6 +31,6 @@ func main() {
|
|||
|
||||
baseName := filepath.Base(os.Args[0])
|
||||
|
||||
err := ark.NewCommand(baseName).Execute()
|
||||
err := velero.NewCommand(baseName).Execute()
|
||||
cmd.CheckError(err)
|
||||
}
|
|
@ -1,10 +1,10 @@
|
|||
# How Ark Works
|
||||
# How Velero Works
|
||||
|
||||
Each Ark operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Ark also includes controllers that process the custom resources to perform backups, restores, and all related operations.
|
||||
Each Velero operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Velero also includes controllers that process the custom resources to perform backups, restores, and all related operations.
|
||||
|
||||
You can back up or restore all objects in your cluster, or you can filter objects by type, namespace, and/or label.
|
||||
|
||||
Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
Velero is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
|
||||
## On-demand backups
|
||||
|
||||
|
@ -27,17 +27,17 @@ Scheduled backups are saved with the name `<SCHEDULE NAME>-<TIMESTAMP>`, where `
|
|||
|
||||
## Restores
|
||||
|
||||
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
|
||||
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Velero supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
|
||||
|
||||
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `ark.heptio.com/restore-name` and value `<RESTORE NAME>`.
|
||||
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `velero.io/restore-name` and value `<RESTORE NAME>`.
|
||||
|
||||
You can also run the Ark server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
You can also run the Velero server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
|
||||
## Backup workflow
|
||||
|
||||
When you run `ark backup create test-backup`:
|
||||
When you run `velero backup create test-backup`:
|
||||
|
||||
1. The Ark client makes a call to the Kubernetes API server to create a `Backup` object.
|
||||
1. The Velero client makes a call to the Kubernetes API server to create a `Backup` object.
|
||||
|
||||
1. The `BackupController` notices the new `Backup` object and performs validation.
|
||||
|
||||
|
@ -45,19 +45,19 @@ When you run `ark backup create test-backup`:
|
|||
|
||||
1. The `BackupController` makes a call to the object storage service -- for example, AWS S3 -- to upload the backup file.
|
||||
|
||||
By default, `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `ark backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`.
|
||||
By default, `velero backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `velero backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`.
|
||||
|
||||
![19]
|
||||
|
||||
## Backed-up API versions
|
||||
|
||||
Ark backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful.
|
||||
Velero backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful.
|
||||
|
||||
For example, if the cluster being backed up has a `gizmos` resource in the `things` API group, with group/versions `things/v1alpha1`, `things/v1beta1`, and `things/v1`, and the server's preferred group/version is `things/v1`, then all `gizmos` will be backed up from the `things/v1` API endpoint. When backups from this cluster are restored, the target cluster **must** have the `things/v1` endpoint in order for `gizmos` to be restored. Note that `things/v1` **does not** need to be the preferred version in the target cluster; it just needs to exist.
|
||||
|
||||
## Set a backup to expire
|
||||
|
||||
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Ark sees that an existing backup resource is expired, it removes:
|
||||
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Velero sees that an existing backup resource is expired, it removes:
|
||||
|
||||
* The backup resource
|
||||
* The backup file from cloud object storage
|
||||
|
@ -66,7 +66,7 @@ When you create a backup, you can specify a TTL by adding the flag `--ttl <DURAT
|
|||
|
||||
## Object storage sync
|
||||
|
||||
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
|
||||
Velero treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Velero synchronizes the information from object storage to Kubernetes.
|
||||
|
||||
This allows restore functionality to work in a cluster migration scenario, where the original backup objects do not exist in the new cluster.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
## API types
|
||||
|
||||
Here we list the API types that have some functionality that you can only configure via json/yaml vs the `ark` cli
|
||||
Here we list the API types that have some functionality that you can only configure via json/yaml vs the `velero` cli
|
||||
(hooks)
|
||||
|
||||
* [Backup][1]
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
|
||||
## Use
|
||||
|
||||
The `Backup` API type is used as a request for the Ark Server to perform a backup. Once created, the
|
||||
Ark Server immediately starts the backup process.
|
||||
The `Backup` API type is used as a request for the Velero Server to perform a backup. Once created, the
|
||||
Velero Server immediately starts the backup process.
|
||||
|
||||
## API GroupVersion
|
||||
|
||||
Backup belongs to the API group version `ark.heptio.com/v1`.
|
||||
Backup belongs to the API group version `velero.io/v1`.
|
||||
|
||||
## Definition
|
||||
|
||||
|
@ -15,15 +15,15 @@ Here is a sample `Backup` object with each of the fields documented:
|
|||
|
||||
```yaml
|
||||
# Standard Kubernetes API Version declaration. Required.
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
# Standard Kubernetes Kind declaration. Required.
|
||||
kind: Backup
|
||||
# Standard Kubernetes metadata. Required.
|
||||
metadata:
|
||||
# Backup name. May be any valid Kubernetes object name. Required.
|
||||
name: a
|
||||
# Backup namespace. Required. In version 0.7.0 and later, can be any string. Must be the namespace of the Ark server.
|
||||
namespace: heptio-ark
|
||||
# Backup namespace. Required. In version 0.7.0 and later, can be any string. Must be the namespace of the Velero server.
|
||||
namespace: velero
|
||||
# Parameters about the backup. Required.
|
||||
spec:
|
||||
# Array of namespaces to include in the backup. If unspecified, all namespaces are included.
|
||||
|
@ -54,11 +54,11 @@ spec:
|
|||
# Individual objects must match this label selector to be included in the backup. Optional.
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: ark
|
||||
app: velero
|
||||
component: server
|
||||
# Whether or not to snapshot volumes. This only applies to PersistentVolumes for Azure, GCE, and
|
||||
# AWS. Valid values are true, false, and null/unset. If unset, Ark performs snapshots as long as
|
||||
# a persistent volume provider is configured for Ark.
|
||||
# AWS. Valid values are true, false, and null/unset. If unset, Velero performs snapshots as long as
|
||||
# a persistent volume provider is configured for Velero.
|
||||
snapshotVolumes: null
|
||||
# Where to store the tarball and logs.
|
||||
storageLocation: aws-primary
|
||||
|
@ -92,7 +92,7 @@ spec:
|
|||
# This hook only applies to objects matching this label selector. Optional.
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app: ark
|
||||
app: velero
|
||||
component: server
|
||||
# An array of hooks to run before executing custom actions. Currently only "exec" hooks are supported.
|
||||
# DEPRECATED. Use pre instead.
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
# Ark Backup Storage Locations
|
||||
# Velero Backup Storage Locations
|
||||
|
||||
## Backup Storage Location
|
||||
|
||||
Ark can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
Velero can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
|
||||
Ark must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `ark server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
Velero must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `velero server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
|
||||
> *NOTE*: `BackupStorageLocation` takes the place of the `Config.backupStorageProvider` key as of v0.10.0
|
||||
|
||||
A sample YAML `BackupStorageLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
|
@ -32,7 +32,7 @@ The configurable parameters are as follows:
|
|||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `objectStorage` | ObjectStorageLocation | Specification of the object storage for the given provider. |
|
||||
| `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. |
|
||||
|
@ -48,10 +48,10 @@ The configurable parameters are as follows:
|
|||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Velero can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `publicUrl` | string | Empty | *Example*: https://minio.mycluster.com<br><br>If specified, use this instead of `s3Url` when generating download URLs (e.g., for logs). This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by ark cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.|
|
||||
| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by velero cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.|
|
||||
|
||||
#### Azure
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
# Ark Volume Snapshot Location
|
||||
# Velero Volume Snapshot Location
|
||||
|
||||
## Volume Snapshot Location
|
||||
|
||||
A volume snapshot location is the location in which to store the volume snapshots created for a backup.
|
||||
|
||||
Ark can be configured to take snapshots of volumes from multiple providers. Ark also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time.
|
||||
Velero can be configured to take snapshots of volumes from multiple providers. Velero also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time.
|
||||
|
||||
Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Ark must have at least one `VolumeSnapshotLocation` per cloud provider.
|
||||
Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Velero must have at least one `VolumeSnapshotLocation` per cloud provider.
|
||||
|
||||
A sample YAML `VolumeSnapshotLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: aws-default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
config:
|
||||
|
@ -30,7 +30,7 @@ The configurable parameters are as follows:
|
|||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. |
|
||||
| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. |
|
||||
| `config` | See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.
|
||||
|
||||
#### AWS
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Run Ark on AWS
|
||||
# Run Velero on AWS
|
||||
|
||||
To set up Ark on AWS, you:
|
||||
To set up Velero on AWS, you:
|
||||
|
||||
* Create your S3 bucket
|
||||
* Create an AWS IAM user for Ark
|
||||
* Create an AWS IAM user for Velero
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
|
||||
|
@ -11,7 +11,7 @@ If you do not have the `aws` CLI locally installed, follow the [user guide][5] t
|
|||
|
||||
## Create S3 bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately:
|
||||
Velero requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately:
|
||||
|
||||
```bash
|
||||
aws s3api create-bucket \
|
||||
|
@ -34,16 +34,16 @@ For more information, see [the AWS documentation on IAM users][14].
|
|||
1. Create the IAM user:
|
||||
|
||||
```bash
|
||||
aws iam create-user --user-name heptio-ark
|
||||
aws iam create-user --user-name velero
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
2. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
2. Attach policies to give `velero` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
|
@ -86,15 +86,15 @@ For more information, see [the AWS documentation on IAM users][14].
|
|||
EOF
|
||||
|
||||
aws iam put-user-policy \
|
||||
--user-name heptio-ark \
|
||||
--policy-name heptio-ark \
|
||||
--policy-document file://heptio-ark-policy.json
|
||||
--user-name velero \
|
||||
--policy-name velero \
|
||||
--policy-document file://velero-policy.json
|
||||
```
|
||||
|
||||
3. Create an access key for the user:
|
||||
|
||||
```bash
|
||||
aws iam create-access-key --user-name heptio-ark
|
||||
aws iam create-access-key --user-name velero
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
@ -102,7 +102,7 @@ For more information, see [the AWS documentation on IAM users][14].
|
|||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
"UserName": "heptio-ark",
|
||||
"UserName": "velero",
|
||||
"Status": "Active",
|
||||
"CreateDate": "2017-07-31T22:24:41.576Z",
|
||||
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
|
||||
|
@ -111,7 +111,7 @@ For more information, see [the AWS documentation on IAM users][14].
|
|||
}
|
||||
```
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
|
@ -123,7 +123,7 @@ For more information, see [the AWS documentation on IAM users][14].
|
|||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -133,17 +133,17 @@ Create a Secret. In the directory of the credentials file you just created, run:
|
|||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
--namespace <VELERO_NAMESPACE> \
|
||||
--from-file cloud=credentials-velero
|
||||
```
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In `config/aws/05-ark-backupstoragelocation.yaml`:
|
||||
* In `config/aws/05-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3 backup storage, region is optional and will be queried from the AWS S3 API if not provided). See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
* In `config/aws/06-ark-volumesnapshotlocation.yaml`:
|
||||
* In `config/aws/06-volumesnapshotlocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_REGION>`. See the [VolumeSnapshotLocation definition][6] for details.
|
||||
|
||||
|
@ -157,7 +157,7 @@ Specify the following values in the example files:
|
|||
|
||||
* (Optional) If you have multiple clusters and you want to support migration of resources between them, in file `config/aws/10-deployment.yaml`:
|
||||
|
||||
* Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `<YOUR_CLUSTER_NAME>` with the current cluster's name. When restoring backup, it will make Ark (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
* Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `<YOUR_CLUSTER_NAME>` with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
|
||||
|
||||
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
|
||||
|
@ -182,11 +182,11 @@ Specify the following values in the example files:
|
|||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
In the root of your Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/aws/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/aws/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/aws/05-backupstoragelocation.yaml
|
||||
kubectl apply -f config/aws/06-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/aws/10-deployment.yaml
|
||||
```
|
||||
|
||||
|
@ -196,12 +196,12 @@ In the root of your Ark directory, run:
|
|||
|
||||
> This path assumes you have `kube2iam` already running in your Kubernetes cluster. If that is not the case, please install it first, following the docs here: [https://github.com/jtblin/kube2iam](https://github.com/jtblin/kube2iam)
|
||||
|
||||
It can be set up for Ark by creating a role that will have required permissions, and later by adding the permissions annotation on the ark deployment to define which role it should use internally.
|
||||
It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally.
|
||||
|
||||
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
|
||||
|
||||
```bash
|
||||
cat > heptio-ark-trust-policy.json <<EOF
|
||||
cat > velero-trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
|
@ -227,14 +227,14 @@ It can be set up for Ark by creating a role that will have required permissions,
|
|||
2. Create the IAM role:
|
||||
|
||||
```bash
|
||||
aws iam create-role --role-name heptio-ark --assume-role-policy-document file://./heptio-ark-trust-policy.json
|
||||
aws iam create-role --role-name velero --assume-role-policy-document file://./velero-trust-policy.json
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
3. Attach policies to give `velero` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
cat > velero-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
|
@ -277,31 +277,31 @@ It can be set up for Ark by creating a role that will have required permissions,
|
|||
EOF
|
||||
|
||||
aws iam put-role-policy \
|
||||
--role-name heptio-ark \
|
||||
--policy-name heptio-ark-policy \
|
||||
--policy-document file://./heptio-ark-policy.json
|
||||
--role-name velero \
|
||||
--policy-name velero-policy \
|
||||
--policy-document file://./velero-policy.json
|
||||
```
|
||||
4. Update `AWS_ACCOUNT_ID` & `HEPTIO_ARK_ROLE_NAME` in the file `config/aws/10-deployment-kube2iam.yaml`:
|
||||
4. Update `AWS_ACCOUNT_ID` & `VELERO_ROLE_NAME` in the file `config/aws/10-deployment-kube2iam.yaml`:
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
...
|
||||
```
|
||||
|
||||
5. Run Ark deployment using the file `config/aws/10-deployment-kube2iam.yaml`.
|
||||
5. Run Velero deployment using the file `config/aws/10-deployment-kube2iam.yaml`.
|
||||
|
||||
[0]: namespace.md
|
||||
[5]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Run Ark on Azure
|
||||
# Run Velero on Azure
|
||||
|
||||
To configure Ark on Azure, you:
|
||||
To configure Velero on Azure, you:
|
||||
|
||||
* Create your Azure storage account and blob container
|
||||
* Create Azure service principal for Ark
|
||||
* Create Azure service principal for Velero
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
|
||||
|
@ -22,11 +22,11 @@ consider using Premium Managed Disks, which are SSD backed.
|
|||
|
||||
## Create Azure storage account and blob container
|
||||
|
||||
Heptio Ark requires a storage account and blob container in which to store backups.
|
||||
Velero requires a storage account and blob container in which to store backups.
|
||||
|
||||
The storage account can be created in the same Resource Group as your Kubernetes cluster or
|
||||
separated into its own Resource Group. The example below shows the storage account created in a
|
||||
separate `Ark_Backups` Resource Group.
|
||||
separate `Velero_Backups` Resource Group.
|
||||
|
||||
The storage account needs to be created with a globally unique id since this is used for dns. In
|
||||
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
|
||||
|
@ -36,11 +36,11 @@ configured to only allow access via https.
|
|||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
|
||||
# Create the storage account
|
||||
AZURE_STORAGE_ACCOUNT_ID="ark$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
|
@ -51,10 +51,10 @@ az storage account create \
|
|||
--access-tier Hot
|
||||
```
|
||||
|
||||
Create the blob container named `ark`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
Create the blob container named `velero`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
|
||||
```bash
|
||||
az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
az storage container create -n velero --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
```
|
||||
|
||||
## Get resource group for persistent volume snapshots
|
||||
|
@ -78,7 +78,7 @@ az storage container create -n ark --public-access off --account-name $AZURE_STO
|
|||
|
||||
## Create service principal
|
||||
|
||||
To integrate Ark with Azure, you must create an Ark-specific [service principal][17].
|
||||
To integrate Velero with Azure, you must create an Velero-specific [service principal][17].
|
||||
|
||||
1. Obtain your Azure Account Subscription ID and Tenant ID:
|
||||
|
||||
|
@ -89,23 +89,23 @@ To integrate Ark with Azure, you must create an Ark-specific [service principal]
|
|||
|
||||
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
|
||||
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --query 'password' -o tsv`
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv`
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "heptio-ark" --query '[0].appId' -o tsv`
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0].
|
||||
In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -115,7 +115,7 @@ Now you need to create a Secret that contains all the environment variables you
|
|||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--namespace <VELERO_NAMESPACE> \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
--from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \
|
||||
--from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \
|
||||
|
@ -125,21 +125,21 @@ kubectl create secret generic cloud-credentials \
|
|||
|
||||
Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `config/azure/05-ark-backupstoragelocation.yaml`:
|
||||
* In file `config/azure/05-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BLOB_CONTAINER>`, `<YOUR_STORAGE_RESOURCE_GROUP>`, and `<YOUR_STORAGE_ACCOUNT>`. See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
* In file `config/azure/06-ark-volumesnapshotlocation.yaml`:
|
||||
* In file `config/azure/06-volumesnapshotlocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_TIMEOUT>`. See the [VolumeSnapshotLocation definition][8] for details.
|
||||
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-ark-deployment.yaml`:
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-deployment.yaml`:
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
In the root of your Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/azure/
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
## Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `velero backup delete`.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
* [Go][5] installed (minimum version 1.8)
|
||||
|
@ -19,7 +19,7 @@
|
|||
```bash
|
||||
mkdir $HOME/go
|
||||
export GOPATH=$HOME/go
|
||||
go get github.com/heptio/ark
|
||||
go get github.com/heptio/velero
|
||||
```
|
||||
|
||||
Where `go` is your [import path][4] for Go.
|
||||
|
@ -29,11 +29,11 @@ For Go development, it is recommended to add the Go import path (`$HOME/go` in t
|
|||
|
||||
## Build
|
||||
|
||||
You can build your Ark image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows.
|
||||
You can build your Velero image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows.
|
||||
|
||||
Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Heptio Ark images to your own registry. This allows any node in your cluster to pull your locally built image.
|
||||
Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Velero images to your own registry. This allows any node in your cluster to pull your locally built image.
|
||||
|
||||
In the Ark root directory, to build your container with the tag `$REGISTRY/ark:$VERSION`, run:
|
||||
In the Velero root directory, to build your container with the tag `$REGISTRY/velero:$VERSION`, run:
|
||||
|
||||
```
|
||||
make container
|
||||
|
@ -63,12 +63,12 @@ Run [generate-proto.sh][13] to regenerate files if you make the following change
|
|||
|
||||
### Cross compiling
|
||||
|
||||
By default, `make build` builds an `ark` binary for `linux-amd64`.
|
||||
By default, `make build` builds an `velero` binary for `linux-amd64`.
|
||||
To build for another platform, run `make build-<GOOS>-<GOARCH>`.
|
||||
For example, to build for the Mac, run `make build-darwin-amd64`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`-- for example, `_output/bin/darwin/amd64/ark`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`-- for example, `_output/bin/darwin/amd64/velero`.
|
||||
|
||||
Ark's `Makefile` has a convenience target, `all-build`, that builds the following platforms:
|
||||
Velero's `Makefile` has a convenience target, `all-build`, that builds the following platforms:
|
||||
|
||||
* linux-amd64
|
||||
* linux-arm
|
||||
|
@ -85,7 +85,7 @@ files (clientset, listers, shared informers, docs) are up to date.
|
|||
|
||||
### Prerequisites
|
||||
|
||||
When running Heptio Ark, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests):
|
||||
When running Velero, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests):
|
||||
|
||||
* Appropriate RBAC permissions in the cluster
|
||||
* Read access for all data from the source cluster and namespaces
|
||||
|
@ -93,8 +93,8 @@ When running Heptio Ark, you will need to account for the following (all of whic
|
|||
* Cloud provider credentials
|
||||
* Read/write access to volumes
|
||||
* Read/write access to object storage for backup data
|
||||
* A [BackupStorageLocation][20] object definition for the Ark server
|
||||
* (Optional) A [VolumeSnapshotLocation][21] object definition for the Ark server, to take PV snapshots
|
||||
* A [BackupStorageLocation][20] object definition for the Velero server
|
||||
* (Optional) A [VolumeSnapshotLocation][21] object definition for the Velero server, to take PV snapshots
|
||||
|
||||
### Create a cluster
|
||||
|
||||
|
@ -104,9 +104,9 @@ To provision a cluster on AWS using Amazon’s official CloudFormation templates
|
|||
|
||||
* eksctl - [a CLI for Amazon EKS][18]
|
||||
|
||||
### Option 1: Run your Ark server locally
|
||||
### Option 1: Run your Velero server locally
|
||||
|
||||
Running the Ark server locally can speed up iterative development. This eliminates the need to rebuild the Ark server
|
||||
Running the Velero server locally can speed up iterative development. This eliminates the need to rebuild the Velero server
|
||||
image and redeploy it to the cluster with each change.
|
||||
|
||||
#### 1. Set enviroment variables
|
||||
|
@ -139,64 +139,64 @@ You may create resources on a cluster using our [example configurations][19].
|
|||
|
||||
##### Example
|
||||
|
||||
Here is how to setup using an existing cluster in AWS: At the root of the Ark repo:
|
||||
Here is how to setup using an existing cluster in AWS: At the root of the Velero repo:
|
||||
|
||||
- Edit `examples/aws/05-ark-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets.
|
||||
- Edit `examples/aws/05-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets.
|
||||
|
||||
- (Optional) Edit `examples/aws/06-ark-volumesnapshotlocation.yaml` to point to your AWS region.
|
||||
- (Optional) Edit `examples/aws/06-volumesnapshotlocation.yaml` to point to your AWS region.
|
||||
|
||||
Then run the commands below.
|
||||
|
||||
`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `heptio-ark` namespace, the `ark` ServiceAccount, and a cluster role binding to grant the `ark` ServiceAccount the cluster-admin role:
|
||||
`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `velero` namespace, the `velero` ServiceAccount, and a cluster role binding to grant the `velero` ServiceAccount the cluster-admin role:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
`10-deployment.yaml` is a sample Ark config resource for AWS:
|
||||
`10-deployment.yaml` is a sample Velero config resource for AWS:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/10-deployment.yaml
|
||||
```
|
||||
|
||||
And `05-ark-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-ark-volumesnapshotlocation.yaml`:
|
||||
And `05-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-volumesnapshotlocation.yaml`:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f examples/aws/05-backupstoragelocation.yaml
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml examples/aws/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f examples/aws/05-backupstoragelocation.yaml examples/aws/06-volumesnapshotlocation.yaml
|
||||
```
|
||||
|
||||
### 3. Start the Ark server
|
||||
### 3. Start the Velero server
|
||||
|
||||
* Make sure `ark` is in your `PATH` or specify the full path.
|
||||
* Make sure `velero` is in your `PATH` or specify the full path.
|
||||
|
||||
* Set variable for Ark as needed. The variables below can be exported as environment variables or passed as CLI cmd flags:
|
||||
* `--kubeconfig`: set the path to the kubeconfig file the Ark server uses to talk to the Kubernetes apiserver
|
||||
* `--namespace`: the set namespace where the Ark server should look for backups, schedules, restores
|
||||
* `--log-level`: set the Ark server's log level
|
||||
* `--plugin-dir`: set the directory where the Ark server looks for plugins
|
||||
* Set variable for Velero as needed. The variables below can be exported as environment variables or passed as CLI cmd flags:
|
||||
* `--kubeconfig`: set the path to the kubeconfig file the Velero server uses to talk to the Kubernetes apiserver
|
||||
* `--namespace`: the set namespace where the Velero server should look for backups, schedules, restores
|
||||
* `--log-level`: set the Velero server's log level
|
||||
* `--plugin-dir`: set the directory where the Velero server looks for plugins
|
||||
* `--metrics-address`: set the bind address and port where Prometheus metrics are exposed
|
||||
|
||||
* Start the server: `ark server`
|
||||
* Start the server: `velero server`
|
||||
|
||||
### Option 2: Run your Ark server in a deployment
|
||||
### Option 2: Run your Velero server in a deployment
|
||||
|
||||
1. Install Ark using a deployment:
|
||||
1. Install Velero using a deployment:
|
||||
|
||||
We have examples of deployments for different cloud providers in `examples/<cloud-provider>/10-deployment.yaml`.
|
||||
|
||||
2. Replace the deployment's default Ark image with the image that you built. Run:
|
||||
2. Replace the deployment's default Velero image with the image that you built. Run:
|
||||
|
||||
```
|
||||
kubectl --namespace=heptio-ark set image deployment/ark ark=$REGISTRY/ark:$VERSION
|
||||
kubectl --namespace=velero set image deployment/velero velero=$REGISTRY/velero:$VERSION
|
||||
```
|
||||
|
||||
where `$REGISTRY` and `$VERSION` are the values that you built Ark with.
|
||||
where `$REGISTRY` and `$VERSION` are the values that you built Velero with.
|
||||
|
||||
## 5. Vendoring dependencies
|
||||
|
||||
|
@ -208,13 +208,13 @@ If you need to add or update the vendored dependencies, see [Vendoring dependenc
|
|||
[3]: #build
|
||||
[4]: https://blog.golang.org/organizing-go-code
|
||||
[5]: https://golang.org/doc/install
|
||||
[6]: https://github.com/heptio/ark/tree/master/examples
|
||||
[6]: https://github.com/heptio/velero/tree/master/examples
|
||||
[7]: #run
|
||||
[8]: config-definition.md
|
||||
[10]: #vendoring-dependencies
|
||||
[11]: vendoring-dependencies.md
|
||||
[12]: #test
|
||||
[13]: https://github.com/heptio/ark/blob/master/hack/generate-proto.sh
|
||||
[13]: https://github.com/heptio/velero/blob/master/hack/generate-proto.sh
|
||||
[14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3
|
||||
[15]: https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#the-shared-credentials-file
|
||||
[16]: https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable
|
||||
|
|
|
@ -3,17 +3,17 @@
|
|||
## General
|
||||
|
||||
### `invalid configuration: no configuration has been provided`
|
||||
This typically means that no `kubeconfig` file can be found for the Ark client to use. Ark looks for a kubeconfig in the
|
||||
This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the
|
||||
following locations:
|
||||
* the path specified by the `--kubeconfig` flag, if any
|
||||
* the path specified by the `$KUBECONFIG` environment variable, if any
|
||||
* `~/.kube/config`
|
||||
|
||||
### Backups or restores stuck in `New` phase
|
||||
This means that the Ark controllers are not processing the backups/restores, which usually happens because the Ark server is not running. Check the pod description and logs for errors:
|
||||
This means that the Velero controllers are not processing the backups/restores, which usually happens because the Velero server is not running. Check the pod description and logs for errors:
|
||||
```
|
||||
kubectl -n heptio-ark describe pods
|
||||
kubectl -n heptio-ark logs deployment/ark
|
||||
kubectl -n velero describe pods
|
||||
kubectl -n velero logs deployment/velero
|
||||
```
|
||||
|
||||
|
||||
|
@ -22,19 +22,19 @@ kubectl -n heptio-ark logs deployment/ark
|
|||
### `NoCredentialProviders: no valid providers in chain`
|
||||
|
||||
#### Using credentials
|
||||
This means that the secret containing the AWS IAM user credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `credentials-ark` file is formatted properly and has the correct values:
|
||||
This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `credentials-velero` file is formatted properly and has the correct values:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<your AWS access key ID>
|
||||
aws_secret_access_key=<your AWS secret access key>
|
||||
```
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
#### Using kube2iam
|
||||
This means that Ark can't read the content of the S3 bucket. Ensure the following:
|
||||
|
@ -45,22 +45,22 @@ This means that Ark can't read the content of the S3 bucket. Ensure the followin
|
|||
## Azure
|
||||
|
||||
### `Failed to refresh the Token` or `adal: Refresh request failed`
|
||||
This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted
|
||||
properly into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted
|
||||
properly into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
|
||||
## GCE/GKE
|
||||
|
||||
### `open credentials/cloud: no such file or directory`
|
||||
This means that the secret containing the GCE service account credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly
|
||||
into the Velero server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Velero server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Velero deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials`
|
||||
|
||||
[0]: azure-config#credentials-and-configuration
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
## Example
|
||||
|
||||
When Heptio Ark finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `ark restore get`:
|
||||
When Velero finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `velero restore get`:
|
||||
|
||||
```
|
||||
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
|
||||
|
@ -15,14 +15,14 @@ backup-test-2-20170726180514 backup-test-2 Completed 0 0 2
|
|||
backup-test-2-20170726180515 backup-test-2 Completed 0 1 2017-07-26 13:32:59 -0400 EDT <none>
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use `ark restore describe`:
|
||||
To delve into the warnings and errors into more detail, you can use `velero restore describe`:
|
||||
```
|
||||
ark restore describe backup-test-20170726180512
|
||||
velero restore describe backup-test-20170726180512
|
||||
```
|
||||
The output looks like this:
|
||||
```
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: heptio-ark
|
||||
Namespace: velero
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
|
@ -48,10 +48,10 @@ Phase: Completed
|
|||
Validation errors: <none>
|
||||
|
||||
Warnings:
|
||||
Ark: <none>
|
||||
Velero: <none>
|
||||
Cluster: <none>
|
||||
Namespaces:
|
||||
heptio-ark: serviceaccounts "ark" already exists
|
||||
velero: serviceaccounts "velero" already exists
|
||||
serviceaccounts "default" already exists
|
||||
kube-public: serviceaccounts "default" already exists
|
||||
kube-system: serviceaccounts "attachdetach-controller" already exists
|
||||
|
@ -80,7 +80,7 @@ Warnings:
|
|||
default: serviceaccounts "default" already exists
|
||||
|
||||
Errors:
|
||||
Ark: <none>
|
||||
Velero: <none>
|
||||
Cluster: <none>
|
||||
Namespaces: <none>
|
||||
```
|
||||
|
@ -93,7 +93,7 @@ of them may have been pre-existing).
|
|||
|
||||
Both errors and warnings are structured in the same way:
|
||||
|
||||
* `Ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
* `Velero`: A list of system-related issues encountered by the Velero server (e.g. couldn't read directory).
|
||||
|
||||
* `Cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
|
||||
|
|
|
@ -2,22 +2,22 @@
|
|||
|
||||
*Using Schedules and Restore-Only Mode*
|
||||
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Heptio Ark looks like the following:
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Velero looks like the following:
|
||||
|
||||
1. After you first run the Ark server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
1. After you first run the Velero server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
|
||||
```
|
||||
ark schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
velero schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
1. A disaster happens and you need to recreate your resources.
|
||||
|
||||
1. Update the Ark server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
1. Create a restore with your most recent Ark Backup:
|
||||
1. Create a restore with your most recent Velero Backup:
|
||||
```
|
||||
ark restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
velero restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
# Expose Minio outside your cluster
|
||||
|
||||
When you run commands to get logs or describe a backup, the Ark server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Ark client -- you need to make Minio available outside the cluster. You can:
|
||||
When you run commands to get logs or describe a backup, the Velero server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Velero client -- you need to make Minio available outside the cluster. You can:
|
||||
|
||||
- Change the Minio Service type from `ClusterIP` to `NodePort`.
|
||||
- Set up Ingress for your cluster, keeping Minio Service type `ClusterIP`.
|
||||
|
||||
In Ark 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config.
|
||||
In Velero 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config.
|
||||
|
||||
For basic instructions on how to install the Ark server and client, see [the getting started example][1].
|
||||
For basic instructions on how to install the Velero server and client, see [the getting started example][1].
|
||||
|
||||
## Expose Minio with Service of type NodePort
|
||||
|
||||
The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Ark client.
|
||||
The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Velero client.
|
||||
|
||||
You must also get the Minio URL, which you can then specify as the value of the new `publicUrl` field in your backup storage config.
|
||||
|
||||
|
@ -22,29 +22,29 @@ You must also get the Minio URL, which you can then specify as the value of the
|
|||
- if you're running Minikube:
|
||||
|
||||
```shell
|
||||
minikube service minio --namespace=heptio-ark --url
|
||||
minikube service minio --namespace=velero --url
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Ark client.
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
kubectl -n heptio-ark get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
|
||||
kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
|
||||
```
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix.
|
||||
1. In `examples/minio/05-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix.
|
||||
|
||||
## Work with Ingress
|
||||
|
||||
Configuring Ingress for your cluster is out of scope for the Ark documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Ark configuration with Minio.
|
||||
Configuring Ingress for your cluster is out of scope for the Velero documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Velero configuration with Minio.
|
||||
|
||||
In this case:
|
||||
|
||||
1. Keep the Service type as `ClusterIP`.
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field.
|
||||
1. In `examples/minio/05-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field.
|
||||
|
||||
[1]: get-started.md
|
||||
[1]: get-started.md
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Extend Ark
|
||||
# Extend Velero
|
||||
|
||||
Ark includes mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
Velero includes mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
|
||||
* [Hooks][27] allow you to specify commands to be executed within running pods during a backup. This is useful if you need to run a workload-specific command prior to taking a backup (for example, to flush disk buffers or to freeze a database).
|
||||
* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Ark without needing to be compiled into the core Ark binary.
|
||||
* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Velero without needing to be compiled into the core Velero binary.
|
||||
|
||||
[27]: hooks.md
|
||||
[28]: plugins.md
|
||||
|
|
22
docs/faq.md
22
docs/faq.md
|
@ -1,15 +1,15 @@
|
|||
# FAQ
|
||||
|
||||
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
|
||||
## When is it appropriate to use Velero instead of etcd's built in backup/restore?
|
||||
|
||||
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
|
||||
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
|
||||
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
|
||||
sophisticated management of your Kubernetes cluster backups and restores, we feel that Velero is
|
||||
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
|
||||
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up
|
||||
and restoring etcd.
|
||||
|
||||
Examples of cases where Ark is useful:
|
||||
Examples of cases where Velero is useful:
|
||||
|
||||
* you don't have access to etcd (e.g. you're running on GKE)
|
||||
* backing up both Kubernetes resources and persistent volume state
|
||||
|
@ -18,20 +18,20 @@ Examples of cases where Ark is useful:
|
|||
* backing up Kubernetes resources that are stored across multiple etcd clusters (for example if you
|
||||
run a custom apiserver)
|
||||
|
||||
## Will Ark restore my Kubernetes resources exactly the way they were before?
|
||||
## Will Velero restore my Kubernetes resources exactly the way they were before?
|
||||
|
||||
Yes, with some exceptions. For example, when Ark restores pods it deletes the `nodeName` from the
|
||||
Yes, with some exceptions. For example, when Velero restores pods it deletes the `nodeName` from the
|
||||
pod so that it can be scheduled onto a new node. You can see some more examples of the differences
|
||||
in [pod_action.go](https://github.com/heptio/ark/blob/master/pkg/restore/pod_action.go)
|
||||
in [pod_action.go](https://github.com/heptio/velero/blob/master/pkg/restore/pod_action.go)
|
||||
|
||||
## I'm using Ark in multiple clusters. Should I use the same bucket to store all of my backups?
|
||||
## I'm using Velero in multiple clusters. Should I use the same bucket to store all of my backups?
|
||||
|
||||
We **strongly** recommend that you use a separate bucket per cluster to store backups. Sharing a bucket
|
||||
across multiple Ark instances can lead to numerous problems - failed backups, overwritten backups,
|
||||
inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Ark
|
||||
across multiple Velero instances can lead to numerous problems - failed backups, overwritten backups,
|
||||
inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Velero
|
||||
instance.
|
||||
|
||||
Related to this, if you need to restore a backup from cluster A into cluster B, please use restore-only
|
||||
mode in cluster B's Ark instance (via the `--restore-only` flag on the `ark server` command specified
|
||||
in your Ark deployment) while it's configured to use cluster A's bucket. This will ensure no
|
||||
mode in cluster B's Velero instance (via the `--restore-only` flag on the `velero server` command specified
|
||||
in your Velero deployment) while it's configured to use cluster A's bucket. This will ensure no
|
||||
new backups are created, and no existing backups are deleted or overwritten.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Run Ark on GCP
|
||||
# Run Velero on GCP
|
||||
|
||||
You can run Kubernetes on Google Cloud Platform in either:
|
||||
|
||||
|
@ -9,7 +9,7 @@ If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the
|
|||
|
||||
## Create GCS bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the <YOUR_BUCKET> placeholder with the name of your bucket:
|
||||
Velero requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the <YOUR_BUCKET> placeholder with the name of your bucket:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
|
@ -19,7 +19,7 @@ gsutil mb gs://$BUCKET/
|
|||
|
||||
## Create service account
|
||||
|
||||
To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
To integrate Velero with GCP, create an Velero-specific [Service Account][15]:
|
||||
|
||||
1. View your current config settings:
|
||||
|
||||
|
@ -36,13 +36,13 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
|||
2. Create a service account:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts create heptio-ark \
|
||||
--display-name "Heptio Ark service account"
|
||||
gcloud iam service-accounts create velero \
|
||||
--display-name "Velero service account"
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
> If you'll be using Velero to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `velero`.
|
||||
|
||||
Then list all accounts and find the `heptio-ark` account you just created:
|
||||
Then list all accounts and find the `velero` account you just created:
|
||||
```bash
|
||||
gcloud iam service-accounts list
|
||||
```
|
||||
|
@ -51,11 +51,11 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
|||
|
||||
```bash
|
||||
SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
|
||||
--filter="displayName:Heptio Ark service account" \
|
||||
--filter="displayName:Velero service account" \
|
||||
--format 'value(email)')
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions to function:
|
||||
3. Attach policies to give `velero` the necessary permissions to function:
|
||||
|
||||
```bash
|
||||
|
||||
|
@ -69,22 +69,22 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
|||
compute.snapshots.delete
|
||||
)
|
||||
|
||||
gcloud iam roles create heptio_ark.server \
|
||||
gcloud iam roles create velero.server \
|
||||
--project $PROJECT_ID \
|
||||
--title "Heptio Ark Server" \
|
||||
--title "Velero Server" \
|
||||
--permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")"
|
||||
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role projects/$PROJECT_ID/roles/heptio_ark.server
|
||||
--role projects/$PROJECT_ID/roles/velero.server
|
||||
|
||||
gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET}
|
||||
```
|
||||
|
||||
4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory:
|
||||
4. Create a service account key, specifying an output file (`credentials-velero`) in your local directory:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts keys create credentials-ark \
|
||||
gcloud iam service-accounts keys create credentials-velero \
|
||||
--iam-account $SERVICE_ACCOUNT_EMAIL
|
||||
```
|
||||
|
||||
|
@ -93,7 +93,7 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
|||
If you run Google Kubernetes Engine (GKE), make sure that your current IAM user is a cluster-admin. This role is required to create RBAC objects.
|
||||
See [the GKE documentation][22] for more information.
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -103,15 +103,15 @@ Create a Secret. In the directory of the credentials file you just created, run:
|
|||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
--namespace velero \
|
||||
--from-file cloud=credentials-velero
|
||||
```
|
||||
|
||||
**Note: If you use a custom namespace, replace `heptio-ark` with the name of the custom namespace**
|
||||
**Note: If you use a custom namespace, replace `velero` with the name of the custom namespace**
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In file `config/gcp/05-ark-backupstoragelocation.yaml`:
|
||||
* In file `config/gcp/05-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`. See the [BackupStorageLocation definition][7] for details.
|
||||
|
||||
|
@ -125,11 +125,11 @@ Specify the following values in the example files:
|
|||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
In the root of your Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/gcp/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/gcp/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/gcp/05-backupstoragelocation.yaml
|
||||
kubectl apply -f config/gcp/06-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/gcp/10-deployment.yaml
|
||||
```
|
||||
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
## Getting started
|
||||
|
||||
The following example sets up the Ark server and client, then backs up and restores a sample application.
|
||||
The following example sets up the Velero server and client, then backs up and restores a sample application.
|
||||
|
||||
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
|
||||
For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31].
|
||||
|
||||
**NOTE** The example lets you explore basic Ark functionality. Configuring Minio for production is out of scope.
|
||||
**NOTE** The example lets you explore basic Velero functionality. Configuring Minio for production is out of scope.
|
||||
|
||||
See [Set up Ark on your platform][3] for how to configure Ark for a production environment.
|
||||
See [Set up Velero on your platform][3] for how to configure Velero for a production environment.
|
||||
|
||||
If you encounter issues with installing or configuring, see [Debugging Installation Issues](debugging-install.md).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `velero backup delete`.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
|
||||
|
@ -25,22 +25,22 @@ If you encounter issues with installing or configuring, see [Debugging Installat
|
|||
```bash
|
||||
tar -xzf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Ark directory" in subsequent steps.
|
||||
We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps.
|
||||
|
||||
1. Move the `ark` binary from the Ark directory to somewhere in your PATH.
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
#### MacOS Installation
|
||||
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `ark` client:
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client:
|
||||
```bash
|
||||
brew install ark
|
||||
brew install velero
|
||||
```
|
||||
|
||||
### Set up server
|
||||
|
||||
These instructions start the Ark server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `ark describe` commands.
|
||||
These instructions start the Velero server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `velero describe` commands.
|
||||
|
||||
1. Start the server and the local storage service. In the Ark directory, run:
|
||||
1. Start the server and the local storage service. In the Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -53,10 +53,10 @@ These instructions start the Ark server and a Minio instance that is accessible
|
|||
kubectl apply -f config/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
1. Check to see that both the Ark and nginx deployments are successfully created:
|
||||
1. Check to see that both the Velero and nginx deployments are successfully created:
|
||||
|
||||
```
|
||||
kubectl get deployments -l component=ark --namespace=heptio-ark
|
||||
kubectl get deployments -l component=velero --namespace=velero
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
```
|
||||
|
||||
|
@ -65,25 +65,25 @@ These instructions start the Ark server and a Minio instance that is accessible
|
|||
1. Create a backup for any object that matches the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
velero backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector 'backup notin (ignore)'
|
||||
velero backup create nginx-backup --selector 'backup notin (ignore)'
|
||||
```
|
||||
|
||||
1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx
|
||||
velero schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively, you can use some non-standard shorthand cron expressions:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="@daily" --selector app=nginx
|
||||
velero schedule create nginx-daily --schedule="@daily" --selector app=nginx
|
||||
```
|
||||
|
||||
See the [cron package's documentation][30] for more usage examples.
|
||||
|
@ -111,13 +111,13 @@ These instructions start the Ark server and a Minio instance that is accessible
|
|||
1. Run:
|
||||
|
||||
```
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
1. Run:
|
||||
|
||||
```
|
||||
ark restore get
|
||||
velero restore get
|
||||
```
|
||||
|
||||
After the restore finishes, the output looks like the following:
|
||||
|
@ -134,7 +134,7 @@ After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` a
|
|||
If there are errors or warnings, you can look at them in detail:
|
||||
|
||||
```
|
||||
ark restore describe <RESTORE_NAME>
|
||||
velero restore describe <RESTORE_NAME>
|
||||
```
|
||||
|
||||
For more information, see [the debugging information][18].
|
||||
|
@ -145,21 +145,21 @@ If you want to delete any backups you created, including data in object storage
|
|||
volume snapshots, you can run:
|
||||
|
||||
```
|
||||
ark backup delete BACKUP_NAME
|
||||
velero backup delete BACKUP_NAME
|
||||
```
|
||||
|
||||
This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do
|
||||
this for each backup you want to permanently delete. A future version of Ark will allow you to
|
||||
This asks the Velero server to delete all backup data associated with `BACKUP_NAME`. You need to do
|
||||
this for each backup you want to permanently delete. A future version of Velero will allow you to
|
||||
delete multiple backups by name or label selector.
|
||||
|
||||
Once fully removed, the backup is no longer visible when you run:
|
||||
|
||||
```
|
||||
ark backup get BACKUP_NAME
|
||||
velero backup get BACKUP_NAME
|
||||
```
|
||||
|
||||
If you want to uninstall Ark but preserve the backup data in object storage and persistent volume
|
||||
snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this
|
||||
If you want to uninstall Velero but preserve the backup data in object storage and persistent volume
|
||||
snapshots, it is safe to remove the `velero` namespace and everything else created for this
|
||||
example:
|
||||
|
||||
```
|
||||
|
@ -171,5 +171,5 @@ kubectl delete -f config/nginx-app/base.yaml
|
|||
[31]: expose-minio.md
|
||||
[3]: install-overview.md
|
||||
[18]: debugging-restores.md
|
||||
[26]: https://github.com/heptio/ark/releases
|
||||
[26]: https://github.com/heptio/velero/releases
|
||||
[30]: https://godoc.org/github.com/robfig/cron
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
# Hooks
|
||||
|
||||
Heptio Ark currently supports executing commands in containers in pods during a backup.
|
||||
Velero currently supports executing commands in containers in pods during a backup.
|
||||
|
||||
## Backup Hooks
|
||||
|
||||
When performing a backup, you can specify one or more commands to execute in a container in a pod
|
||||
when that pod is being backed up.
|
||||
|
||||
Ark versions prior to v0.7.0 only support hooks that execute prior to any custom action processing
|
||||
Velero versions prior to v0.7.0 only support hooks that execute prior to any custom action processing
|
||||
("pre" hooks).
|
||||
|
||||
As of version v0.7.0, Ark also supports "post" hooks - these execute after all custom actions have
|
||||
As of version v0.7.0, Velero also supports "post" hooks - these execute after all custom actions have
|
||||
completed, as well as after all the additional items specified by custom actions have been backed
|
||||
up.
|
||||
|
||||
|
@ -18,28 +18,26 @@ There are two ways to specify hooks: annotations on the pod itself, and in the B
|
|||
|
||||
### Specifying Hooks As Pod Annotations
|
||||
|
||||
You can use the following annotations on a pod to make Ark execute a hook when backing up the pod:
|
||||
You can use the following annotations on a pod to make Velero execute a hook when backing up the pod:
|
||||
|
||||
#### Pre hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `pre.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
Ark v0.7.0+ continues to support the original (deprecated) way to specify pre hooks - without the
|
||||
`pre.` prefix in the annotation names (e.g. `hook.backup.ark.heptio.com/container`).
|
||||
|
||||
#### Post hooks (v0.7.0+)
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `post.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
### Specifying Hooks in the Backup Spec
|
||||
|
||||
|
@ -56,25 +54,25 @@ setup this example.
|
|||
|
||||
### Annotations
|
||||
|
||||
The Ark [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
|
||||
The Velero [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
|
||||
to your declarative deployment. Below is an example of what updating an object in place might look like.
|
||||
|
||||
```shell
|
||||
kubectl annotate pod -n nginx-example -l app=nginx \
|
||||
pre.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \
|
||||
pre.hook.backup.ark.heptio.com/container=fsfreeze \
|
||||
post.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \
|
||||
post.hook.backup.ark.heptio.com/container=fsfreeze
|
||||
pre.hook.backup.velero.io/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \
|
||||
pre.hook.backup.velero.io/container=fsfreeze \
|
||||
post.hook.backup.velero.io/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \
|
||||
post.hook.backup.velero.io/container=fsfreeze
|
||||
```
|
||||
|
||||
Now test the pre and post hooks by creating a backup. You can use the Ark logs to verify that the pre and post
|
||||
Now test the pre and post hooks by creating a backup. You can use the Velero logs to verify that the pre and post
|
||||
hooks are running and exiting without error.
|
||||
|
||||
```shell
|
||||
ark backup create nginx-hook-test
|
||||
velero backup create nginx-hook-test
|
||||
|
||||
ark backup get nginx-hook-test
|
||||
ark backup logs nginx-hook-test | grep hookCommand
|
||||
velero backup get nginx-hook-test
|
||||
velero backup logs nginx-hook-test | grep hookCommand
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -1,31 +1,31 @@
|
|||
# Use IBM Cloud Object Storage as Ark's storage destination.
|
||||
You can deploy Ark on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Ark's backups.
|
||||
# Use IBM Cloud Object Storage as Velero's storage destination.
|
||||
You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups.
|
||||
|
||||
To set up IBM Cloud Object Storage (COS) as Ark's destination, you:
|
||||
To set up IBM Cloud Object Storage (COS) as Velero's destination, you:
|
||||
|
||||
* Create your COS instance
|
||||
* Create an S3 bucket
|
||||
* Define a service that can store data in the bucket
|
||||
* Configure and start the Ark server
|
||||
* Configure and start the Velero server
|
||||
|
||||
|
||||
## Create COS instance
|
||||
If you don’t have a COS instance, you can create a new one, according to the detailed instructions in [Creating a new resource instance][1].
|
||||
|
||||
## Create an S3 bucket
|
||||
Heptio Ark requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
|
||||
Velero requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
|
||||
|
||||
## Define a service that can store data in the bucket.
|
||||
The process of creating service credentials is described in [Service credentials][3].
|
||||
Several comments:
|
||||
|
||||
1. The Ark service will write its backup into the bucket, so it requires the “Writer” access role.
|
||||
1. The Velero service will write its backup into the bucket, so it requires the “Writer” access role.
|
||||
|
||||
2. Ark uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide.
|
||||
2. Velero uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide.
|
||||
|
||||
3. After successfully creating a Service credential, you can view the JSON definition of the credential. Under the `cos_hmac_keys` entry there are `access_key_id` and `secret_access_key`. We will use them in the next step.
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
4. Create an Velero-specific credentials file (`credentials-velero`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
|
@ -37,7 +37,7 @@ Several comments:
|
|||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -47,13 +47,13 @@ Create a Secret. In the directory of the credentials file you just created, run:
|
|||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
--namespace <VELERO_NAMESPACE> \
|
||||
--from-file cloud=credentials-velero
|
||||
```
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In `config/ibm/05-ark-backupstoragelocation.yaml`:
|
||||
* In `config/ibm/05-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_REGION>` and `<YOUR_URL_ACCESS_POINT>`. See the [BackupStorageLocation definition][6] for details.
|
||||
|
||||
|
@ -61,12 +61,12 @@ Specify the following values in the example files:
|
|||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
|
||||
## Start the Ark server
|
||||
## Start the Velero server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
In the root of your Velero directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/ibm/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/ibm/05-backupstoragelocation.yaml
|
||||
kubectl apply -f config/ibm/10-deployment.yaml
|
||||
```
|
||||
|
||||
|
|
|
@ -1,21 +1,21 @@
|
|||
# Image tagging policy
|
||||
|
||||
This document describes Ark's image tagging policy.
|
||||
This document describes Velero's image tagging policy.
|
||||
|
||||
## Released versions
|
||||
|
||||
`gcr.io/heptio-images/ark:<SemVer>`
|
||||
`gcr.io/heptio-images/velero:<SemVer>`
|
||||
|
||||
Ark follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/ark` repository has a matching image, e.g. `gcr.io/heptio-images/ark:v0.8.0`.
|
||||
Velero follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/velero` repository has a matching image, e.g. `gcr.io/heptio-images/velero:v0.11.0`.
|
||||
|
||||
### Latest
|
||||
|
||||
`gcr.io/heptio-images/ark:latest`
|
||||
`gcr.io/heptio-images/velero:latest`
|
||||
|
||||
The `latest` tag follows the most recently released version of Ark.
|
||||
The `latest` tag follows the most recently released version of Velero.
|
||||
|
||||
## Development
|
||||
|
||||
`gcr.io/heptio-images/ark:master`
|
||||
`gcr.io/heptio-images/velero:master`
|
||||
|
||||
The `master` tag follows the latest commit to land on the `master` branch.
|
||||
The `master` tag follows the latest commit to land on the `master` branch.
|
||||
|
|
|
@ -1,42 +1,42 @@
|
|||
# Set up Ark on your platform
|
||||
# Set up Velero on your platform
|
||||
|
||||
You can run Ark with a cloud provider or on-premises. For detailed information about the platforms that Ark supports, see [Compatible Storage Providers][99].
|
||||
You can run Velero with a cloud provider or on-premises. For detailed information about the platforms that Velero supports, see [Compatible Storage Providers][99].
|
||||
|
||||
In version 0.7.0 and later, you can run Ark in any namespace, which requires additional customization. See [Run in custom namespace][3].
|
||||
In version 0.7.0 and later, you can run Velero in any namespace, which requires additional customization. See [Run in custom namespace][3].
|
||||
|
||||
In version 0.9.0 and later, you can use Ark's integration with restic, which requires additional setup. See [restic instructions][20].
|
||||
In version 0.9.0 and later, you can use Velero's integration with restic, which requires additional setup. See [restic instructions][20].
|
||||
|
||||
## Customize configuration
|
||||
|
||||
Whether you run Ark on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Ark deployment YAML.
|
||||
Whether you run Velero on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Velero deployment YAML.
|
||||
|
||||
For details, see the documentation topics for individual cloud providers.
|
||||
|
||||
## Cloud provider
|
||||
|
||||
The Ark repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see:
|
||||
The Velero repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see:
|
||||
|
||||
* [Run Ark on AWS][0]
|
||||
* [Run Ark on GCP][1]
|
||||
* [Run Ark on Azure][2]
|
||||
* [Use IBM Cloud Object Store as Ark's storage destination][4]
|
||||
* [Run Velero on AWS][0]
|
||||
* [Run Velero on GCP][1]
|
||||
* [Run Velero on Azure][2]
|
||||
* [Use IBM Cloud Object Store as Velero's storage destination][4]
|
||||
|
||||
## On-premises
|
||||
|
||||
You can run Ark in an on-premises cluster in different ways depending on your requirements.
|
||||
You can run Velero in an on-premises cluster in different ways depending on your requirements.
|
||||
|
||||
First, you must select an object storage backend that Ark can use to store backup data. [Compatible Storage Providers][99] contains information on various
|
||||
First, you must select an object storage backend that Velero can use to store backup data. [Compatible Storage Providers][99] contains information on various
|
||||
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
|
||||
not using another storage platform that offers an S3-compatible object storage API.
|
||||
|
||||
Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on
|
||||
the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Ark plugin to get native Portworx snapshots as part
|
||||
of your Ark backups. If there is no native snapshot plugin available for your storage platform, you can use Ark's [restic integration][20], which provides a
|
||||
the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Velero plugin to get native Portworx snapshots as part
|
||||
of your Velero backups. If there is no native snapshot plugin available for your storage platform, you can use Velero's [restic integration][20], which provides a
|
||||
platform-agnostic backup solution for volume data.
|
||||
|
||||
## Examples
|
||||
|
||||
After you set up the Ark server, try these examples:
|
||||
After you set up the Velero server, try these examples:
|
||||
|
||||
### Basic example (without PersistentVolumes)
|
||||
|
||||
|
@ -49,7 +49,7 @@ After you set up the Ark server, try these examples:
|
|||
1. Create a backup:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
velero backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
@ -63,7 +63,7 @@ After you set up the Ark server, try these examples:
|
|||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
### Snapshot example (with PersistentVolumes)
|
||||
|
@ -79,7 +79,7 @@ After you set up the Ark server, try these examples:
|
|||
1. Create a backup with PV snapshotting:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
velero backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
@ -93,7 +93,7 @@ After you set up the Ark server, try these examples:
|
|||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
velero restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
[0]: aws-config.md
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd/cli/bug"
|
||||
"github.com/heptio/velero/pkg/cmd/cli/bug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -38,7 +38,7 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = tmpl.Execute(outFile, bug.ArkBugInfo{})
|
||||
err = tmpl.Execute(outFile, bug.VeleroBugInfo{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -1,55 +1,55 @@
|
|||
# Backup Storage Locations and Volume Snapshot Locations
|
||||
|
||||
Ark v0.10 introduces a new way of configuring where Ark backups and their associated persistent volume snapshots are stored.
|
||||
Velero v0.10 introduces a new way of configuring where Velero backups and their associated persistent volume snapshots are stored.
|
||||
|
||||
## Motivations
|
||||
|
||||
In Ark versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Ark backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Ark backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`).
|
||||
In Velero versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Velero backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Velero backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`).
|
||||
|
||||
There are a number of use cases that this basic design does not support, such as:
|
||||
|
||||
- Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
- Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
- Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
- Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
- For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud
|
||||
|
||||
Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Ark to be able to support multiple possible storage locations.
|
||||
Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Velero to be able to support multiple possible storage locations.
|
||||
|
||||
## Overview
|
||||
|
||||
In Ark v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively.
|
||||
In Velero v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively.
|
||||
|
||||
Now, the user can pre-define more than one possible `BackupStorageLocation` and more than one `VolumeSnapshotLocation`, and can select *at backup creation time* the location in which the backup and associated snapshots should be stored.
|
||||
|
||||
A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Ark data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider.
|
||||
A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Velero data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
A `VolumeSnapshotLocation` is defined entirely by provider-specific fields (e.g. AWS region, Azure resource group, Portworx snapshot type, etc.) The [API documentation][2] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Ark backup.
|
||||
Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Velero backup.
|
||||
|
||||
## Limitations / Caveats
|
||||
|
||||
- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Ark backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail.
|
||||
- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Velero backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail.
|
||||
|
||||
- Each Ark backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Ark backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important.
|
||||
- Each Velero backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Velero backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important.
|
||||
|
||||
- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Ark will **only** snapshot the EBS volumes.
|
||||
- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Velero will **only** snapshot the EBS volumes.
|
||||
|
||||
- Restic data is now stored under a prefix/subdirectory of the main Ark bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time.
|
||||
- Restic data is now stored under a prefix/subdirectory of the main Velero bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time.
|
||||
|
||||
## Examples
|
||||
|
||||
Let's look at some examples of how we can use this new mechanism to address each of our previously unsupported use cases:
|
||||
|
||||
#### Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
#### Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create ebs-us-east-1 \
|
||||
velero snapshot-location create ebs-us-east-1 \
|
||||
--provider aws \
|
||||
--config region=us-east-1
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
velero snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
|
@ -57,43 +57,43 @@ ark snapshot-location create portworx-cloud \
|
|||
During backup creation:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup \
|
||||
velero backup create full-cluster-backup \
|
||||
--volume-snapshot-locations ebs-us-east-1,portworx-cloud
|
||||
```
|
||||
|
||||
Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Ark doesn't require them to be explicitly specified when creating the backup:
|
||||
Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Velero doesn't require them to be explicitly specified when creating the backup:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
#### Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
#### Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
velero backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--bucket velero-backups \
|
||||
--config region=us-east-1
|
||||
|
||||
ark backup-location create s3-alt-region \
|
||||
velero backup-location create s3-alt-region \
|
||||
--provider aws \
|
||||
--bucket ark-backups-alt \
|
||||
--bucket velero-backups-alt \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# The Ark server will automatically store backups in the backup storage location named "default" if
|
||||
# The Velero server will automatically store backups in the backup storage location named "default" if
|
||||
# one is not specified when creating the backup. You can alter which backup storage location is used
|
||||
# by default by setting the --default-backup-storage-location flag on the `ark server` command (run
|
||||
# by the Ark deployment) to the name of a different backup storage location.
|
||||
ark backup create full-cluster-backup
|
||||
# by default by setting the --default-backup-storage-location flag on the `velero server` command (run
|
||||
# by the Velero deployment) to the name of a different backup storage location.
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
Or:
|
||||
```shell
|
||||
ark backup create full-cluster-alternate-location-backup \
|
||||
velero backup create full-cluster-alternate-location-backup \
|
||||
--storage-location s3-alt-region
|
||||
```
|
||||
|
||||
|
@ -102,11 +102,11 @@ ark backup create full-cluster-alternate-location-backup \
|
|||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create portworx-local \
|
||||
velero snapshot-location create portworx-local \
|
||||
--provider portworx \
|
||||
--config type=local
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
velero snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
|
@ -116,49 +116,49 @@ During backup creation:
|
|||
```shell
|
||||
# Note that since in this example we have two possible volume snapshot locations for the Portworx
|
||||
# provider, we need to explicitly specify which one to use when creating a backup. Alternately,
|
||||
# you can set the --default-volume-snapshot-locations flag on the `ark server` command (run by
|
||||
# the Ark deployment) to specify which location should be used for each provider by default, in
|
||||
# you can set the --default-volume-snapshot-locations flag on the `velero server` command (run by
|
||||
# the Velero deployment) to specify which location should be used for each provider by default, in
|
||||
# which case you don't need to specify it when creating a backup.
|
||||
ark backup create local-snapshot-backup \
|
||||
velero backup create local-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-local
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```shell
|
||||
ark backup create cloud-snapshot-backup \
|
||||
velero backup create cloud-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-cloud
|
||||
```
|
||||
|
||||
#### One location is still easy
|
||||
|
||||
If you don't have a use case for more than one location, it's still just as easy to use Ark. Let's assume you're running on AWS, in the `us-west-1` region:
|
||||
If you don't have a use case for more than one location, it's still just as easy to use Velero. Let's assume you're running on AWS, in the `us-west-1` region:
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
velero backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--bucket velero-backups \
|
||||
--config region=us-west-1
|
||||
|
||||
ark snapshot-location create ebs-us-west-1 \
|
||||
velero snapshot-location create ebs-us-west-1 \
|
||||
--provider aws \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# Ark's will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Velero's will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing new needs to be specified when creating a backup.
|
||||
ark backup create full-cluster-backup
|
||||
velero backup create full-cluster-backup
|
||||
```
|
||||
|
||||
## Additional Use Cases
|
||||
|
||||
1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is now possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details.
|
||||
|
||||
1. If you're using Azure, you may want to store your Ark backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details.
|
||||
1. If you're using Azure, you may want to store your Velero backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details.
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -2,31 +2,31 @@
|
|||
|
||||
*Using Backups and Restores*
|
||||
|
||||
Heptio Ark can help you port your resources from one cluster to another, as long as you point each Ark instance to the same cloud object storage location. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Heptio Ark does not support the migration of persistent volumes across cloud providers.**
|
||||
Velero can help you port your resources from one cluster to another, as long as you point each Velero instance to the same cloud object storage location. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Velero does not support the migration of persistent volumes across cloud providers.**
|
||||
|
||||
1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Ark `schedule` operation, you need to first back up your entire cluster (replacing `<BACKUP-NAME>` as desired):
|
||||
1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Velero `schedule` operation, you need to first back up your entire cluster (replacing `<BACKUP-NAME>` as desired):
|
||||
|
||||
```
|
||||
ark backup create <BACKUP-NAME>
|
||||
velero backup create <BACKUP-NAME>
|
||||
```
|
||||
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Ark deployment YAML.
|
||||
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML.
|
||||
|
||||
1. *(Cluster 2)* Make sure that the `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs match the ones from *Cluster 1*, so that your new Ark server instance points to the same bucket.
|
||||
1. *(Cluster 2)* Make sure that the `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs match the ones from *Cluster 1*, so that your new Velero server instance points to the same bucket.
|
||||
|
||||
1. *(Cluster 2)* Make sure that the Ark Backup object is created. Ark resources are synchronized with the backup files in cloud storage.
|
||||
1. *(Cluster 2)* Make sure that the Velero Backup object is created. Velero resources are synchronized with the backup files in cloud storage.
|
||||
|
||||
```
|
||||
ark backup describe <BACKUP-NAME>
|
||||
velero backup describe <BACKUP-NAME>
|
||||
```
|
||||
|
||||
**Note:** As of version 0.10, the default sync interval is 1 minute, so make sure to wait before checking. You can configure this interval with the `--backup-sync-period` flag to the Ark server.
|
||||
**Note:** As of version 0.10, the default sync interval is 1 minute, so make sure to wait before checking. You can configure this interval with the `--backup-sync-period` flag to the Velero server.
|
||||
|
||||
1. *(Cluster 2)* Once you have confirmed that the right Backup (`<BACKUP-NAME>`) is now present, you can restore everything with:
|
||||
|
||||
```
|
||||
ark restore create --from-backup <BACKUP-NAME>
|
||||
velero restore create --from-backup <BACKUP-NAME>
|
||||
```
|
||||
|
||||
## Verify both clusters
|
||||
|
@ -36,13 +36,13 @@ Check that the second cluster is behaving as expected:
|
|||
1. *(Cluster 2)* Run:
|
||||
|
||||
```
|
||||
ark restore get
|
||||
velero restore get
|
||||
```
|
||||
|
||||
1. Then run:
|
||||
|
||||
```
|
||||
ark restore describe <RESTORE-NAME-FROM-GET-COMMAND>
|
||||
velero restore describe <RESTORE-NAME-FROM-GET-COMMAND>
|
||||
```
|
||||
|
||||
If you encounter issues, make sure that Ark is running in the same namespace in both clusters.
|
||||
If you encounter issues, make sure that Velero is running in the same namespace in both clusters.
|
|
@ -1,38 +1,38 @@
|
|||
# Run in custom namespace
|
||||
|
||||
In Ark version 0.7.0 and later, you can run Ark in any namespace. To do so, you specify the
|
||||
namespace in the YAML files that configure the Ark server. You then also specify the namespace when
|
||||
you run Ark client commands.
|
||||
In Velero version 0.7.0 and later, you can run Velero in any namespace. To do so, you specify the
|
||||
namespace in the YAML files that configure the Velero server. You then also specify the namespace when
|
||||
you run Velero client commands.
|
||||
|
||||
## Edit the example files
|
||||
|
||||
The Ark release tarballs include a set of example configs that you can use to set up your Ark server. The
|
||||
examples place the server and backup/schedule/restore/etc. data in the `heptio-ark` namespace.
|
||||
The Velero release tarballs include a set of example configs that you can use to set up your Velero server. The
|
||||
examples place the server and backup/schedule/restore/etc. data in the `velero` namespace.
|
||||
|
||||
To run the server in another namespace, you edit the relevant files, changing `heptio-ark` to
|
||||
To run the server in another namespace, you edit the relevant files, changing `velero` to
|
||||
your desired namespace.
|
||||
|
||||
To store your backups, schedules, restores, and config in another namespace, you edit the relevant
|
||||
files, changing `heptio-ark` to your desired namespace. You also need to create the
|
||||
files, changing `velero` to your desired namespace. You also need to create the
|
||||
`cloud-credentials` secret in your desired namespace.
|
||||
|
||||
First, ensure you've [downloaded & extracted the latest release][0].
|
||||
|
||||
For all cloud providers, edit `config/common/00-prereqs.yaml`. This file defines:
|
||||
|
||||
* CustomResourceDefinitions for the Ark objects (backups, schedules, restores, downloadrequests, etc.)
|
||||
* The namespace where the Ark server runs
|
||||
* CustomResourceDefinitions for the Velero objects (backups, schedules, restores, downloadrequests, etc.)
|
||||
* The namespace where the Velero server runs
|
||||
* The namespace where backups, schedules, restores, etc. are stored
|
||||
* The Ark service account
|
||||
* The RBAC rules to grant permissions to the Ark service account
|
||||
* The Velero service account
|
||||
* The RBAC rules to grant permissions to the Velero service account
|
||||
|
||||
|
||||
### AWS
|
||||
|
||||
For AWS, edit:
|
||||
|
||||
* `config/aws/05-ark-backupstoragelocation.yaml`
|
||||
* `config/aws/06-ark-volumesnapshotlocation.yaml`
|
||||
* `config/aws/05-backupstoragelocation.yaml`
|
||||
* `config/aws/06-volumesnapshotlocation.yaml`
|
||||
* `config/aws/10-deployment.yaml`
|
||||
|
||||
|
||||
|
@ -40,16 +40,16 @@ For AWS, edit:
|
|||
|
||||
For Azure, edit:
|
||||
|
||||
* `config/azure/00-ark-deployment.yaml`
|
||||
* `config/azure/05-ark-backupstoragelocation.yaml`
|
||||
* `config/azure/06-ark-volumesnapshotlocation.yaml`
|
||||
* `config/azure/00-deployment.yaml`
|
||||
* `config/azure/05-backupstoragelocation.yaml`
|
||||
* `config/azure/06-volumesnapshotlocation.yaml`
|
||||
|
||||
### GCP
|
||||
|
||||
For GCP, edit:
|
||||
|
||||
* `config/gcp/05-ark-backupstoragelocation.yaml`
|
||||
* `config/gcp/06-ark-volumesnapshotlocation.yaml`
|
||||
* `config/gcp/05-backupstoragelocation.yaml`
|
||||
* `config/gcp/06-volumesnapshotlocation.yaml`
|
||||
* `config/gcp/10-deployment.yaml`
|
||||
|
||||
|
||||
|
@ -57,16 +57,16 @@ For GCP, edit:
|
|||
|
||||
For IBM, edit:
|
||||
|
||||
* `config/ibm/05-ark-backupstoragelocation.yaml`
|
||||
* `config/ibm/05-backupstoragelocation.yaml`
|
||||
* `config/ibm/10-deployment.yaml`
|
||||
|
||||
|
||||
## Specify the namespace in client commands
|
||||
|
||||
To specify the namespace for all Ark client commands, run:
|
||||
To specify the namespace for all Velero client commands, run:
|
||||
|
||||
```
|
||||
ark client config set namespace=<NAMESPACE_VALUE>
|
||||
velero client config set namespace=<NAMESPACE_VALUE>
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
# Output file format
|
||||
|
||||
A backup is a gzip-compressed tar file whose name matches the Backup API resource's `metadata.name` (what is specified during `ark backup create <NAME>`).
|
||||
A backup is a gzip-compressed tar file whose name matches the Backup API resource's `metadata.name` (what is specified during `velero backup create <NAME>`).
|
||||
|
||||
In cloud object storage, each backup file is stored in its own subdirectory in the bucket specified in the Ark server configuration. This subdirectory includes an additional file called `ark-backup.json`. The JSON file lists all information about your associated Backup resource, including any default values. This gives you a complete historical record of the backup configuration. The JSON file also specifies `status.version`, which corresponds to the output file format.
|
||||
In cloud object storage, each backup file is stored in its own subdirectory in the bucket specified in the Velero server configuration. This subdirectory includes an additional file called `velero-backup.json`. The JSON file lists all information about your associated Backup resource, including any default values. This gives you a complete historical record of the backup configuration. The JSON file also specifies `status.version`, which corresponds to the output file format.
|
||||
|
||||
The directory structure in your cloud storage looks something like:
|
||||
|
||||
```
|
||||
rootBucket/
|
||||
backup1234/
|
||||
ark-backup.json
|
||||
velero-backup.json
|
||||
backup1234.tar.gz
|
||||
```
|
||||
|
||||
|
@ -18,11 +18,11 @@ rootBucket/
|
|||
```json
|
||||
{
|
||||
"kind": "Backup",
|
||||
"apiVersion": "ark.heptio.com/v1",
|
||||
"apiVersion": "velero.io/v1",
|
||||
"metadata": {
|
||||
"name": "test-backup",
|
||||
"namespace": "heptio-ark",
|
||||
"selfLink": "/apis/ark.heptio.com/v1/namespaces/heptio-ark/backups/testtest",
|
||||
"namespace": "velero",
|
||||
"selfLink": "/apis/velero.io/v1/namespaces/velero/backups/testtest",
|
||||
"uid": "a12345cb-75f5-11e7-b4c2-abcdef123456",
|
||||
"resourceVersion": "337075",
|
||||
"creationTimestamp": "2017-07-31T13:39:15Z"
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
# Plugins
|
||||
|
||||
Heptio Ark has a plugin architecture that allows users to add their own custom functionality to Ark backups & restores
|
||||
without having to modify/recompile the core Ark binary. To add custom functionality, users simply create their own binary
|
||||
containing implementations of Ark's plugin kinds (described below), plus a small amount of boilerplate code to
|
||||
expose the plugin implementations to Ark. This binary is added to a container image that serves as an init container for
|
||||
the Ark server pod and copies the binary into a shared emptyDir volume for the Ark server to access.
|
||||
Velero has a plugin architecture that allows users to add their own custom functionality to Velero backups & restores
|
||||
without having to modify/recompile the core Velero binary. To add custom functionality, users simply create their own binary
|
||||
containing implementations of Velero's plugin kinds (described below), plus a small amount of boilerplate code to
|
||||
expose the plugin implementations to Velero. This binary is added to a container image that serves as an init container for
|
||||
the Velero server pod and copies the binary into a shared emptyDir volume for the Velero server to access.
|
||||
|
||||
Multiple plugins, of any type, can be implemented in this binary.
|
||||
|
||||
|
@ -12,7 +12,7 @@ A fully-functional [sample plugin repository][1] is provided to serve as a conve
|
|||
|
||||
## Plugin Kinds
|
||||
|
||||
Ark currently supports the following kinds of plugins:
|
||||
Velero currently supports the following kinds of plugins:
|
||||
|
||||
- **Object Store** - persists and retrieves backups, backup logs and restore logs
|
||||
- **Block Store** - creates volume snapshots (during backup) and restores volumes from snapshots (during restore)
|
||||
|
@ -21,11 +21,11 @@ Ark currently supports the following kinds of plugins:
|
|||
|
||||
## Plugin Logging
|
||||
|
||||
Ark provides a [logger][2] that can be used by plugins to log structured information to the main Ark server log or
|
||||
Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or
|
||||
per-backup/restore logs. See the [sample repository][1] for an example of how to instantiate and use the logger
|
||||
within your plugin.
|
||||
|
||||
|
||||
|
||||
[1]: https://github.com/heptio/ark-plugin-example
|
||||
[2]: https://github.com/heptio/ark/blob/master/pkg/plugin/logger.go
|
||||
[1]: https://github.com/heptio/velero-plugin-example
|
||||
[2]: https://github.com/heptio/velero/blob/master/pkg/plugin/logger.go
|
||||
|
|
10
docs/rbac.md
10
docs/rbac.md
|
@ -1,6 +1,6 @@
|
|||
# Run Ark more securely with restrictive RBAC settings
|
||||
# Run Velero more securely with restrictive RBAC settings
|
||||
|
||||
By default Ark runs with an RBAC policy of ClusterRole `cluster-admin`. This is to make sure that Ark can back up or restore anything in your cluster. But `cluster-admin` access is wide open -- it gives Ark components access to everything in your cluster. Depending on your environment and your security needs, you should consider whether to configure additional RBAC policies with more restrictive access.
|
||||
By default Velero runs with an RBAC policy of ClusterRole `cluster-admin`. This is to make sure that Velero can back up or restore anything in your cluster. But `cluster-admin` access is wide open -- it gives Velero components access to everything in your cluster. Depending on your environment and your security needs, you should consider whether to configure additional RBAC policies with more restrictive access.
|
||||
|
||||
**Note:** Roles and RoleBindings are associated with a single namespaces, not with an entire cluster. PersistentVolume backups are associated only with an entire cluster. This means that any backups or restores that use a restrictive Role and RoleBinding pair can manage only the resources that belong to the namespace. You do not need a wide open RBAC policy to manage PersistentVolumes, however. You can configure a ClusterRole and ClusterRoleBinding that allow backups and restores only of PersistentVolumes, not of all objects in the cluster.
|
||||
|
||||
|
@ -17,10 +17,10 @@ metadata:
|
|||
namespace: YOUR_NAMESPACE_HERE
|
||||
name: ROLE_NAME_HERE
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ark.heptio.com
|
||||
- velero.io
|
||||
verbs:
|
||||
- "*"
|
||||
resources:
|
||||
|
@ -44,4 +44,4 @@ roleRef:
|
|||
[1]: https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/
|
||||
[2]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/
|
||||
[3]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/
|
||||
[4]: namespace.md
|
||||
[4]: namespace.md
|
||||
|
|
118
docs/restic.md
118
docs/restic.md
|
@ -1,16 +1,16 @@
|
|||
# Restic Integration
|
||||
|
||||
As of version 0.9.0, Ark has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called
|
||||
As of version 0.9.0, Velero has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called
|
||||
[restic][1].
|
||||
|
||||
Ark has always allowed you to take snapshots of persistent volumes as part of your backups if you’re using one of
|
||||
Velero has always allowed you to take snapshots of persistent volumes as part of your backups if you’re using one of
|
||||
the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
|
||||
Starting with version 0.6.0, we provide a plugin model that enables anyone to implement additional object and block storage
|
||||
backends, outside the main Ark repository.
|
||||
backends, outside the main Velero repository.
|
||||
|
||||
We integrated restic with Ark so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
|
||||
volume*. This is a new capability for Ark, not a replacement for existing functionality. If you're running on AWS, and
|
||||
taking EBS snapshots as part of your regular Ark backups, there's no need to switch to using restic. However, if you've
|
||||
We integrated restic with Velero so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
|
||||
volume*. This is a new capability for Velero, not a replacement for existing functionality. If you're running on AWS, and
|
||||
taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you've
|
||||
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
|
||||
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
|
||||
|
||||
|
@ -23,16 +23,16 @@ cross-volume-type data migrations. Stay tuned as this evolves!
|
|||
|
||||
### Prerequisites
|
||||
|
||||
- A working install of Ark version 0.10.0 or later. See [Set up Ark][2]
|
||||
- A local clone of [the latest release tag of the Ark repository][3]
|
||||
- Ark's restic integration requires the Kubernetes [MountPropagation feature][6], which is enabled by default in Kubernetes v1.10.0 and later.
|
||||
- A working install of Velero version 0.10.0 or later. See [Set up Velero][2]
|
||||
- A local clone of [the latest release tag of the Velero repository][3]
|
||||
- Velero's restic integration requires the Kubernetes [MountPropagation feature][6], which is enabled by default in Kubernetes v1.10.0 and later.
|
||||
|
||||
|
||||
### Instructions
|
||||
|
||||
1. Ensure you've [downloaded & extracted the latest release][3].
|
||||
|
||||
1. In the Ark directory (i.e. where you extracted the release tarball), run the following to create new custom resource definitions:
|
||||
1. In the Velero directory (i.e. where you extracted the release tarball), run the following to create new custom resource definitions:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
|
@ -45,14 +45,14 @@ cross-volume-type data migrations. Stay tuned as this evolves!
|
|||
- GCP: `kubectl apply -f config/gcp/20-restic-daemonset.yaml`
|
||||
- Minio: `kubectl apply -f config/minio/30-restic-daemonset.yaml`
|
||||
|
||||
You're now ready to use Ark with restic.
|
||||
You're now ready to use Velero with restic.
|
||||
|
||||
## Back up
|
||||
|
||||
1. Run the following for each pod that contains a volume to back up:
|
||||
|
||||
```bash
|
||||
kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.ark.heptio.com/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,...
|
||||
kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,...
|
||||
```
|
||||
|
||||
where the volume names are the names of the volumes in the pod spec.
|
||||
|
@ -84,91 +84,91 @@ You're now ready to use Ark with restic.
|
|||
|
||||
You'd run:
|
||||
```bash
|
||||
kubectl -n foo annotate pod/sample backup.ark.heptio.com/backup-volumes=pvc-volume,emptydir-volume
|
||||
kubectl -n foo annotate pod/sample backup.velero.io/backup-volumes=pvc-volume,emptydir-volume
|
||||
```
|
||||
|
||||
This annotation can also be provided in a pod template spec if you use a controller to manage your pods.
|
||||
|
||||
1. Take an Ark backup:
|
||||
1. Take an Velero backup:
|
||||
|
||||
```bash
|
||||
ark backup create NAME OPTIONS...
|
||||
velero backup create NAME OPTIONS...
|
||||
```
|
||||
|
||||
1. When the backup completes, view information about the backups:
|
||||
|
||||
```bash
|
||||
ark backup describe YOUR_BACKUP_NAME
|
||||
velero backup describe YOUR_BACKUP_NAME
|
||||
|
||||
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=YOUR_BACKUP_NAME -o yaml
|
||||
kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml
|
||||
```
|
||||
|
||||
## Restore
|
||||
|
||||
1. Restore from your Ark backup:
|
||||
1. Restore from your Velero backup:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup BACKUP_NAME OPTIONS...
|
||||
velero restore create --from-backup BACKUP_NAME OPTIONS...
|
||||
```
|
||||
|
||||
1. When the restore completes, view information about your pod volume restores:
|
||||
|
||||
```bash
|
||||
ark restore describe YOUR_RESTORE_NAME
|
||||
velero restore describe YOUR_RESTORE_NAME
|
||||
|
||||
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=YOUR_RESTORE_NAME -o yaml
|
||||
kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported.
|
||||
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
|
||||
common encryption key for all restic repositories created by Ark. **This means that anyone who has access to your
|
||||
common encryption key for all restic repositories created by Velero. **This means that anyone who has access to your
|
||||
bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket
|
||||
appropriately. We plan to implement full Ark backup encryption, including securing the restic encryption keys, in
|
||||
appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in
|
||||
a future release.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Run the following checks:
|
||||
|
||||
Are your Ark server and daemonset pods running?
|
||||
Are your Velero server and daemonset pods running?
|
||||
|
||||
```bash
|
||||
kubectl get pods -n heptio-ark
|
||||
kubectl get pods -n velero
|
||||
```
|
||||
|
||||
Does your restic repository exist, and is it ready?
|
||||
|
||||
```bash
|
||||
ark restic repo get
|
||||
velero restic repo get
|
||||
|
||||
ark restic repo get REPO_NAME -o yaml
|
||||
velero restic repo get REPO_NAME -o yaml
|
||||
```
|
||||
|
||||
Are there any errors in your Ark backup/restore?
|
||||
Are there any errors in your Velero backup/restore?
|
||||
|
||||
```bash
|
||||
ark backup describe BACKUP_NAME
|
||||
ark backup logs BACKUP_NAME
|
||||
velero backup describe BACKUP_NAME
|
||||
velero backup logs BACKUP_NAME
|
||||
|
||||
ark restore describe RESTORE_NAME
|
||||
ark restore logs RESTORE_NAME
|
||||
velero restore describe RESTORE_NAME
|
||||
velero restore logs RESTORE_NAME
|
||||
```
|
||||
|
||||
What is the status of your pod volume backups/restores?
|
||||
|
||||
```bash
|
||||
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=BACKUP_NAME -o yaml
|
||||
kubectl -n velero get podvolumebackups -l velero.io/backup-name=BACKUP_NAME -o yaml
|
||||
|
||||
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=RESTORE_NAME -o yaml
|
||||
kubectl -n velero get podvolumerestores -l velero.io/restore-name=RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
Is there any useful information in the Ark server or daemon pod logs?
|
||||
Is there any useful information in the Velero server or daemon pod logs?
|
||||
|
||||
```bash
|
||||
kubectl -n heptio-ark logs deploy/ark
|
||||
kubectl -n heptio-ark logs DAEMON_POD_NAME
|
||||
kubectl -n velero logs deploy/velero
|
||||
kubectl -n velero logs DAEMON_POD_NAME
|
||||
```
|
||||
|
||||
**NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument
|
||||
|
@ -178,71 +178,71 @@ to the container command in the deployment/daemonset pod template spec.
|
|||
|
||||
We introduced three custom resource definitions and associated controllers:
|
||||
|
||||
- `ResticRepository` - represents/manages the lifecycle of Ark's [restic repositories][5]. Ark creates
|
||||
- `ResticRepository` - represents/manages the lifecycle of Velero's [restic repositories][5]. Velero creates
|
||||
a restic repository per namespace when the first restic backup for a namespace is requested. The controller
|
||||
for this custom resource executes restic repository lifecycle commands -- `restic init`, `restic check`,
|
||||
and `restic prune`.
|
||||
|
||||
You can see information about your Ark restic repositories by running `ark restic repo get`.
|
||||
You can see information about your Velero restic repositories by running `velero restic repo get`.
|
||||
|
||||
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Ark backup process creates
|
||||
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Velero backup process creates
|
||||
one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this
|
||||
resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes
|
||||
`restic backup` commands to backup pod volume data.
|
||||
|
||||
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Ark restore process creates one
|
||||
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Velero restore process creates one
|
||||
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
|
||||
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
|
||||
on that node. The controller executes `restic restore` commands to restore pod volume data.
|
||||
|
||||
### Backup
|
||||
|
||||
1. The main Ark backup process checks each pod that it's backing up for the annotation specifying a restic backup
|
||||
should be taken (`backup.ark.heptio.com/backup-volumes`)
|
||||
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
|
||||
1. The main Velero backup process checks each pod that it's backing up for the annotation specifying a restic backup
|
||||
should be taken (`backup.velero.io/backup-volumes`)
|
||||
1. When found, Velero first ensures a restic repository exists for the pod's namespace, by:
|
||||
- checking if a `ResticRepository` custom resource already exists
|
||||
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it
|
||||
1. Ark then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation
|
||||
1. The main Ark process now waits for the `PodVolumeBackup` resources to complete or fail
|
||||
1. Velero then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation
|
||||
1. The main Velero process now waits for the `PodVolumeBackup` resources to complete or fail
|
||||
1. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which:
|
||||
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
|
||||
- finds the pod volume's subdirectory within the above volume
|
||||
- runs `restic backup`
|
||||
- updates the status of the custom resource to `Completed` or `Failed`
|
||||
1. As each `PodVolumeBackup` finishes, the main Ark process captures its restic snapshot ID and adds it as an annotation
|
||||
to the copy of the pod JSON that's stored in the Ark backup. This will be used for restores, as seen in the next section.
|
||||
1. As each `PodVolumeBackup` finishes, the main Velero process captures its restic snapshot ID and adds it as an annotation
|
||||
to the copy of the pod JSON that's stored in the Velero backup. This will be used for restores, as seen in the next section.
|
||||
|
||||
### Restore
|
||||
|
||||
1. The main Ark restore process checks each pod that it's restoring for annotations specifying a restic backup
|
||||
exists for a volume in the pod (`snapshot.ark.heptio.com/<volume-name>`)
|
||||
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
|
||||
1. The main Velero restore process checks each pod that it's restoring for annotations specifying a restic backup
|
||||
exists for a volume in the pod (`snapshot.velero.io/<volume-name>`)
|
||||
1. When found, Velero first ensures a restic repository exists for the pod's namespace, by:
|
||||
- checking if a `ResticRepository` custom resource already exists
|
||||
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that
|
||||
in this case, the actual repository should already exist in object storage, so the Ark controller will simply
|
||||
in this case, the actual repository should already exist in object storage, so the Velero controller will simply
|
||||
check it for integrity)
|
||||
1. Ark adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more
|
||||
1. Velero adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more
|
||||
on this shortly)
|
||||
1. Ark creates the pod, with the added init container, by submitting it to the Kubernetes API
|
||||
1. Ark creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod
|
||||
1. The main Ark process now waits for each `PodVolumeRestore` resource to complete or fail
|
||||
1. Velero creates the pod, with the added init container, by submitting it to the Kubernetes API
|
||||
1. Velero creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod
|
||||
1. The main Velero process now waits for each `PodVolumeRestore` resource to complete or fail
|
||||
1. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which:
|
||||
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
|
||||
- waits for the pod to be running the init container
|
||||
- finds the pod volume's subdirectory within the above volume
|
||||
- runs `restic restore`
|
||||
- on success, writes a file into the pod volume, in an `.ark` subdirectory, whose name is the UID of the Ark restore
|
||||
- on success, writes a file into the pod volume, in a `.velero` subdirectory, whose name is the UID of the Velero restore
|
||||
that this pod volume restore is for
|
||||
- updates the status of the custom resource to `Completed` or `Failed`
|
||||
1. The init container that was added to the pod is running a process that waits until it finds a file
|
||||
within each restored volume, under `.ark`, whose name is the UID of the Ark restore being run
|
||||
within each restored volume, under `.velero`, whose name is the UID of the Velero restore being run
|
||||
1. Once all such files are found, the init container's process terminates successfully and the pod moves
|
||||
on to running other init containers/the main containers.
|
||||
|
||||
|
||||
[1]: https://github.com/restic/restic
|
||||
[2]: install-overview.md
|
||||
[3]: https://github.com/heptio/ark/releases/
|
||||
[3]: https://github.com/heptio/velero/releases/
|
||||
[4]: https://kubernetes.io/docs/concepts/storage/volumes/#local
|
||||
[5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology
|
||||
[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
# Object Storage Layout Changes in v0.10
|
||||
|
||||
## Overview
|
||||
|
||||
Ark v0.10 includes breaking changes to where data is stored in your object storage bucket. You'll need to run a [one-time migration procedure](#upgrading-to-v010)
|
||||
if you're upgrading from prior versions of Ark.
|
||||
|
||||
## Details
|
||||
|
||||
Prior to v0.10, Ark stored data in an object storage bucket using the following structure:
|
||||
|
||||
```
|
||||
<your-bucket>/
|
||||
backup-1/
|
||||
ark-backup.json
|
||||
backup-1.tar.gz
|
||||
backup-1-logs.gz
|
||||
restore-of-backup-1-logs.gz
|
||||
restore-of-backup-1-results.gz
|
||||
backup-2/
|
||||
ark-backup.json
|
||||
backup-2.tar.gz
|
||||
backup-2-logs.gz
|
||||
restore-of-backup-2-logs.gz
|
||||
restore-of-backup-2-results.gz
|
||||
...
|
||||
```
|
||||
|
||||
Ark also stored restic data, if applicable, in a separate object storage bucket, structured as:
|
||||
|
||||
```
|
||||
<your-ark-restic-bucket>/[<your-optional-prefix>/]
|
||||
namespace-1/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
namespace-2/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
...
|
||||
```
|
||||
|
||||
As of v0.10, we've reorganized this layout to provide a cleaner and more extensible directory structure. The new layout looks like:
|
||||
|
||||
```
|
||||
<your-bucket>[/<your-prefix>]/
|
||||
backups/
|
||||
backup-1/
|
||||
ark-backup.json
|
||||
backup-1.tar.gz
|
||||
backup-1-logs.gz
|
||||
backup-2/
|
||||
ark-backup.json
|
||||
backup-2.tar.gz
|
||||
backup-2-logs.gz
|
||||
...
|
||||
restores/
|
||||
restore-of-backup-1/
|
||||
restore-of-backup-1-logs.gz
|
||||
restore-of-backup-1-results.gz
|
||||
restore-of-backup-2/
|
||||
restore-of-backup-2-logs.gz
|
||||
restore-of-backup-2-results.gz
|
||||
...
|
||||
restic/
|
||||
namespace-1/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
namespace-2/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
## Upgrading to v0.10
|
||||
|
||||
Before upgrading to v0.10, you'll need to run a one-time upgrade script to rearrange the contents of your existing Ark bucket(s) to be compatible with
|
||||
the new layout.
|
||||
|
||||
Please note that the following scripts **will not** migrate existing restore logs/results into the new `restores/` subdirectory. This means that they
|
||||
will not be accessible using `ark restore describe` or `ark restore logs`. They *will* remain in the relevant backup's subdirectory so they are manually
|
||||
accessible, and will eventually be garbage-collected along with the backup. We've taken this approach in order to keep the migration scripts simple
|
||||
and less error-prone.
|
||||
|
||||
### rclone-Based Script
|
||||
|
||||
This script uses [rclone][1], which you can download and install following the instructions [here][2].
|
||||
Please read through the script carefully before starting and execute it step-by-step.
|
||||
|
||||
```bash
|
||||
ARK_BUCKET=<your-ark-bucket>
|
||||
ARK_TEMP_MIGRATION_BUCKET=<a-temp-bucket-for-migration>
|
||||
|
||||
# 1. This is an interactive step that configures rclone to be
|
||||
# able to access your storage provider. Follow the instructions,
|
||||
# and keep track of the "remote name" for the next step:
|
||||
rclone config
|
||||
|
||||
# 2. Store the name of the rclone remote that you just set up
|
||||
# in Step #1:
|
||||
RCLONE_REMOTE_NAME=<your-remote-name>
|
||||
|
||||
# 3. Create a temporary bucket to be used as a backup of your
|
||||
# current Ark bucket's contents:
|
||||
rclone mkdir ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 4. Do a full copy of the contents of your Ark bucket into the
|
||||
# temporary bucket:
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 5. Verify that the temporary bucket contains an exact copy of
|
||||
# your Ark bucket's contents. You should see a short block
|
||||
# of output stating "0 differences found":
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 6. Delete your Ark bucket's contents (this command does not
|
||||
# delete the bucket itself, only the contents):
|
||||
rclone delete ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}
|
||||
|
||||
# 7. Copy the contents of the temporary bucket into your Ark bucket,
|
||||
# under the 'backups/' directory/prefix:
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups
|
||||
|
||||
# 8. Verify that the 'backups/' directory in your Ark bucket now
|
||||
# contains an exact copy of the temporary bucket's contents:
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 9. OPTIONAL: If you have restic data to migrate:
|
||||
|
||||
# a. Copy the contents of your Ark restic location into your
|
||||
# Ark bucket, under the 'restic/' directory/prefix:
|
||||
ARK_RESTIC_LOCATION=<your-ark-restic-bucket[/optional-prefix]>
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic
|
||||
|
||||
# b. Check that the 'restic/' directory in your Ark bucket now
|
||||
# contains an exact copy of your restic location:
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION}
|
||||
|
||||
# c. Delete your ResticRepository custom resources to allow Ark
|
||||
# to find them in the new location:
|
||||
kubectl -n heptio-ark delete resticrepositories --all
|
||||
|
||||
# 10. Once you've confirmed that Ark v0.10 works with your revised Ark
|
||||
# bucket, you can delete the temporary migration bucket.
|
||||
```
|
||||
|
||||
[1]: https://rclone.org/
|
||||
[2]: https://rclone.org/downloads/
|
|
@ -1,20 +1,20 @@
|
|||
# Compatible Storage Providers
|
||||
|
||||
Ark supports a variety of storage providers for different backup and snapshot operations. As of version 0.6.0, a plugin system allows anyone to add compatibility for additional backup and volume storage platforms without modifying the Ark codebase.
|
||||
Velero supports a variety of storage providers for different backup and snapshot operations. As of version 0.6.0, a plugin system allows anyone to add compatibility for additional backup and volume storage platforms without modifying the Velero codebase.
|
||||
|
||||
## Backup Storage Providers
|
||||
|
||||
| Provider | Owner | Contact |
|
||||
|---------------------------|----------|---------------------------------|
|
||||
| [AWS S3][2] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Blob Storage][3] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Cloud Storage][4] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [AWS S3][2] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Blob Storage][3] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Cloud Storage][4] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
|
||||
## S3-Compatible Backup Storage Providers
|
||||
|
||||
Ark uses [Amazon's Go SDK][12] to connect to the S3 API. Some third-party storage providers also support the S3 API, and users have reported the following providers work with Ark:
|
||||
Velero uses [Amazon's Go SDK][12] to connect to the S3 API. Some third-party storage providers also support the S3 API, and users have reported the following providers work with Velero:
|
||||
|
||||
_Note that these providers are not regularly tested by the Ark team._
|
||||
_Note that these providers are not regularly tested by the Velero team._
|
||||
|
||||
* [IBM Cloud][5]
|
||||
* [Minio][9]
|
||||
|
@ -28,10 +28,10 @@ _Some storage providers, like Quobyte, may need a different [signature algorithm
|
|||
|
||||
| Provider | Owner | Contact |
|
||||
|----------------------------------|-----------------|---------------------------------|
|
||||
| [AWS EBS][2] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Managed Disks][3] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Compute Engine Disks][4] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Restic][1] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [AWS EBS][2] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Managed Disks][3] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Compute Engine Disks][4] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Restic][1] | Velero Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Portworx][6] | Portworx | [Slack][13], [GitHub Issue][14] |
|
||||
| [DigitalOcean][7] | StackPointCloud | |
|
||||
|
||||
|
@ -48,10 +48,10 @@ After you publish your plugin, open a PR that adds your plugin to the appropriat
|
|||
[5]: ibm-config.md
|
||||
[6]: https://docs.portworx.com/scheduler/kubernetes/ark.html
|
||||
[7]: https://github.com/StackPointCloud/ark-plugin-digitalocean
|
||||
[8]: https://github.com/heptio/ark-plugin-example/
|
||||
[8]: https://github.com/heptio/velero-plugin-example/
|
||||
[9]: get-started.md
|
||||
[10]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[11]: https://github.com/heptio/ark/issues
|
||||
[10]: https://kubernetes.slack.com/messages/velero
|
||||
[11]: https://github.com/heptio/velero/issues
|
||||
[12]: https://github.com/aws/aws-sdk-go/aws
|
||||
[13]: https://portworx.slack.com/messages/px-k8s
|
||||
[14]: https://github.com/portworx/ark-plugin/issues
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Troubleshooting
|
||||
|
||||
These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
|
||||
These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [#velero channel][25] on the Kubernetes Slack server.
|
||||
|
||||
See also:
|
||||
|
||||
|
@ -9,29 +9,29 @@ See also:
|
|||
|
||||
## General troubleshooting information
|
||||
|
||||
In `ark` version >= `0.1.0`, you can use the `ark bug` command to open a [Github issue][4] by launching a browser window with some prepopulated values. Values included are OS, CPU architecture, `kubectl` client and server versions (if available) and the `ark` client version. This information isn't submitted to Github until you click the `Submit new issue` button in the Github UI, so feel free to add, remove or update whatever information you like.
|
||||
In `velero` version >= `0.10.0`, you can use the `velero bug` command to open a [Github issue][4] by launching a browser window with some prepopulated values. Values included are OS, CPU architecture, `kubectl` client and server versions (if available) and the `velero` client version. This information isn't submitted to Github until you click the `Submit new issue` button in the Github UI, so feel free to add, remove or update whatever information you like.
|
||||
|
||||
Some general commands for troubleshooting that may be helpful:
|
||||
|
||||
* `ark backup describe <backupName>` - describe the details of a backup
|
||||
* `ark backup logs <backupName>` - fetch the logs for this specific backup. Useful for viewing failures and warnings, including resources that could not be backed up.
|
||||
* `ark restore describe <restoreName>` - describe the details of a restore
|
||||
* `ark restore logs <restoreName>` - fetch the logs for this specific restore. Useful for viewing failures and warnings, including resources that could not be restored.
|
||||
* `kubectl logs deployment/ark -n heptio-ark` - fetch the logs of the Ark server pod. This provides the output of the Ark server processes.
|
||||
* `velero backup describe <backupName>` - describe the details of a backup
|
||||
* `velero backup logs <backupName>` - fetch the logs for this specific backup. Useful for viewing failures and warnings, including resources that could not be backed up.
|
||||
* `velero restore describe <restoreName>` - describe the details of a restore
|
||||
* `velero restore logs <restoreName>` - fetch the logs for this specific restore. Useful for viewing failures and warnings, including resources that could not be restored.
|
||||
* `kubectl logs deployment/velero -n velero` - fetch the logs of the Velero server pod. This provides the output of the Velero server processes.
|
||||
|
||||
### Getting ark debug logs
|
||||
### Getting velero debug logs
|
||||
|
||||
You can increase the verbosity of the Ark server by editing your Ark deployment to look like this:
|
||||
You can increase the verbosity of the Velero server by editing your Velero deployment to look like this:
|
||||
|
||||
|
||||
```
|
||||
kubectl edit deployment/ark -n heptio-ark
|
||||
kubectl edit deployment/velero -n velero
|
||||
...
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
- --log-level # Add this line
|
||||
|
@ -41,18 +41,18 @@ kubectl edit deployment/ark -n heptio-ark
|
|||
|
||||
## Known issue with restoring LoadBalancer Service
|
||||
|
||||
Because of how Kubernetes handles Service objects of `type=LoadBalancer`, when you restore these objects you might encounter an issue with changed values for Service UIDs. Kubernetes automatically generates the name of the cloud resource based on the Service UID, which is different when restored, resulting in a different name for the cloud load balancer. If the DNS CNAME for your application points to the DNS name of your cloud load balancer, you'll need to update the CNAME pointer when you perform an Ark restore.
|
||||
Because of how Kubernetes handles Service objects of `type=LoadBalancer`, when you restore these objects you might encounter an issue with changed values for Service UIDs. Kubernetes automatically generates the name of the cloud resource based on the Service UID, which is different when restored, resulting in a different name for the cloud load balancer. If the DNS CNAME for your application points to the DNS name of your cloud load balancer, you'll need to update the CNAME pointer when you perform an Velero restore.
|
||||
|
||||
Alternatively, you might be able to use the Service's `spec.loadBalancerIP` field to keep connections valid, if your cloud provider supports this value. See [the Kubernetes documentation about Services of Type LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
|
||||
|
||||
## Miscellaneous issues
|
||||
|
||||
### Ark reports `custom resource not found` errors when starting up.
|
||||
### Velero reports `custom resource not found` errors when starting up.
|
||||
|
||||
Ark's server will not start if the required Custom Resource Definitions are not found in Kubernetes. Apply
|
||||
the `config/common/00-prereqs.yaml` file to create these definitions, then restart Ark.
|
||||
Velero's server will not start if the required Custom Resource Definitions are not found in Kubernetes. Apply
|
||||
the `config/common/00-prereqs.yaml` file to create these definitions, then restart Velero.
|
||||
|
||||
### `ark backup logs` returns a `SignatureDoesNotMatch` error
|
||||
### `velero backup logs` returns a `SignatureDoesNotMatch` error
|
||||
|
||||
Downloading artifacts from object storage utilizes temporary, signed URLs. In the case of S3-compatible
|
||||
providers, such as Ceph, there may be differences between their implementation and the official S3
|
||||
|
@ -66,6 +66,6 @@ Here are some things to verify if you receive `SignatureDoesNotMatch` errors:
|
|||
|
||||
[1]: debugging-restores.md
|
||||
[2]: debugging-install.md
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[4]: https://github.com/heptio/velero/issues
|
||||
[5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
|
||||
[25]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[25]: https://kubernetes.slack.com/messages/velero
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
# Upgrading to Ark v0.10
|
||||
|
||||
## Overview
|
||||
|
||||
Ark v0.10 includes a number of breaking changes. Below, we outline what those changes are, and what steps you should take to ensure
|
||||
a successful upgrade from prior versions of Ark.
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
### Switch from Config to BackupStorageLocation and VolumeSnapshotLocation CRDs, and new server flags
|
||||
|
||||
Prior to v0.10, Ark used a `Config` CRD to capture information about your backup storage and persistent volume providers, as well
|
||||
some miscellaneous Ark settings. In v0.10, we've eliminated this CRD and replaced it with:
|
||||
|
||||
- A [BackupStorageLocation][1] CRD to capture information about where to store your backups
|
||||
- A [VolumeSnapshotLocation][2] CRD to capture information about where to store your persistent volume snapshots
|
||||
- Command-line flags for the `ark server` command (run by your Ark deployment) to capture miscellaneous Ark settings
|
||||
|
||||
When upgrading to v0.10, you'll need to transfer the configuration information that you currently have in the `Config` CRD
|
||||
into the above. We'll cover exactly how to do this below.
|
||||
|
||||
For a general overview of this change, see the [Locations documentation][4].
|
||||
|
||||
### Reorganization of data in object storage
|
||||
|
||||
We've made [changes to the layout of data stored in object storage][3] for simplicity and extensibility. You'll need to
|
||||
rearrange any pre-v0.10 data as part of the upgrade. We've provided a script to help with this.
|
||||
|
||||
## Step-by-Step Upgrade Instructions
|
||||
|
||||
1. Ensure you've [downloaded & extracted the latest release][5].
|
||||
|
||||
1. Scale down your existing Ark deployment:
|
||||
```bash
|
||||
kubectl scale -n heptio-ark deploy/ark --replicas 0
|
||||
```
|
||||
|
||||
1. In the Ark directory (i.e. where you extracted the release tarball), re-apply the `00-prereqs.yaml` file to create new CRDs:
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
1. Create one or more [BackupStorageLocation][1] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary.
|
||||
|
||||
1. If you're using Ark to take PV snapshots, create one or more [VolumeSnapshotLocation][2] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary.
|
||||
|
||||
1. Perform the one-time object storage migration detailed [here][3].
|
||||
|
||||
1. In your Ark deployment YAML (see the `config/` directory for samples), specify flags to the `ark server` command under the container's `args`:
|
||||
|
||||
a. The names of the `BackupStorageLocation` and `VolumeSnapshotLocation(s)` that should be used by default for backups. If defaults are set here,
|
||||
users won't need to explicitly specify location names when creating backups (though they still can, if they want to store backups/snapshots in
|
||||
alternate locations). If no value is specified for `--default-backup-storage-location`, the Ark server looks for a `BackupStorageLocation`
|
||||
named `default` to use.
|
||||
|
||||
Flag | Default Value | Description | Example
|
||||
---- | ------------- | ----------- | -------
|
||||
`--default-backup-storage-location` | "default" | name of the backup storage location that should be used by default for backups | aws-us-east-1-bucket
|
||||
`--default-volume-snapshot-locations` | [none] | name of the volume snapshot location(s) that should be used by default for PV snapshots, for each PV provider | aws:us-east-1,portworx:local
|
||||
|
||||
**NOTE:** the values of these flags should correspond to the names of a `BackupStorageLocation` and `VolumeSnapshotLocation(s)` custom resources
|
||||
in the cluster.
|
||||
|
||||
b. Any non-default Ark server settings:
|
||||
|
||||
Flag | Default Value | Description
|
||||
---- | ------------- | -----------
|
||||
`--backup-sync-period` | 1m | how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster
|
||||
`--restic-timeout` | 1h | how long backups/restores of pod volumes should be allowed to run before timing out (previously `podVolumeOperationTimeout` in the `Config` resource in pre-v0.10 versions)
|
||||
`--restore-only` | false | run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled
|
||||
|
||||
1. If you are using any plugins, update the Ark deployment YAML to reference the latest image tag for your plugins. This can be found under the `initContainers` section of your deployment YAML.
|
||||
|
||||
1. Apply your updated Ark deployment YAML to your cluster and ensure the pod(s) starts up successfully.
|
||||
|
||||
1. If you're using Ark's restic integration, ensure the daemon set pods have been re-created with the latest Ark image (if your daemon set YAML is using the `:latest` tag, you can delete the pods so they're recreated with an updated image).
|
||||
|
||||
1. Once you've confirmed all of your settings have been migrated over correctly, delete the Config CRD:
|
||||
```bash
|
||||
kubectl delete -n heptio-ark config --all
|
||||
kubectl delete crd configs.ark.heptio.com
|
||||
```
|
||||
|
||||
|
||||
[1]: api-types/backupstoragelocation.md
|
||||
[2]: api-types/volumesnapshotlocation.md
|
||||
[3]: storage-layout-reorg-v0.10.md
|
||||
[4]: locations.md
|
||||
[5]: get-started.md#download
|
|
@ -1,6 +1,6 @@
|
|||
# Upgrading Ark versions
|
||||
# Upgrading Velero versions
|
||||
|
||||
Ark supports multiple concurrent versions. Whether you're setting up Ark for the first time or upgrading to a new version, you need to pay careful attention to versioning. This doc page is new as of version 0.10.0, and will be updated with information about subsequent releases.
|
||||
Velero supports multiple concurrent versions. Whether you're setting up Velero for the first time or upgrading to a new version, you need to pay careful attention to versioning. This doc page is new as of version 0.10.0, and will be updated with information about subsequent releases.
|
||||
|
||||
## Minor versions, patch versions
|
||||
|
||||
|
@ -14,13 +14,13 @@ Breaking changes are documented in the release notes and in the documentation.
|
|||
|
||||
- See [Upgrading to version 0.10.0][2]
|
||||
|
||||
## Ark versions and Kubernetes versions
|
||||
## Velero versions and Kubernetes versions
|
||||
|
||||
Not all Ark versions support all versions of Kubernetes. You should be aware of the following known limitations:
|
||||
Not all Velero versions support all versions of Kubernetes. You should be aware of the following known limitations:
|
||||
|
||||
- Ark version 0.9.0 requires Kubernetes version 1.8 or later. In version 0.9.1, Ark was updated to support earlier versions.
|
||||
- Velero version 0.9.0 requires Kubernetes version 1.8 or later. In version 0.9.1, Velero was updated to support earlier versions.
|
||||
- Restic support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. See [Restic Integration][3].
|
||||
|
||||
[1]: https://github.com/heptio/ark/releases
|
||||
[2]: upgrading-to-v0.10.md
|
||||
[1]: https://github.com/heptio/velero/releases
|
||||
[2]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10
|
||||
[3]: restic.md
|
||||
|
|
|
@ -3,13 +3,13 @@
|
|||
As an Open Source community, it is necessary for our work, communication, and collaboration to be done in the open.
|
||||
GitHub provides a central repository for code, pull requests, issues, and documentation. When applicable, we will use Google Docs for design reviews, proposals, and other working documents.
|
||||
|
||||
While GitHub issues, milestones, and labels generally work pretty well, the Heptio team has found that product planning requires some additional tooling that GitHub projects do not offer.
|
||||
While GitHub issues, milestones, and labels generally work pretty well, the Velero team has found that product planning requires some additional tooling that GitHub projects do not offer.
|
||||
|
||||
In our effort to minimize tooling while enabling product management insights, we have decided to use [ZenHub Open-Source](https://www.zenhub.com/blog/open-source/) to overlay product and project tracking on top of GitHub.
|
||||
ZenHub is a GitHub application that provides Kanban visualization, Epic tracking, fine-grained prioritization, and more. It's primary backing storage system is existing GitHub issues along with additional metadata stored in ZenHub's database.
|
||||
|
||||
If you are an Ark user or Ark Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub.
|
||||
If you are an Velero user or Velero Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub.
|
||||
|
||||
## Using ZenHub
|
||||
|
||||
ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated [web application](https://app.zenhub.com/workspace/o/heptio/ark/boards?filterLogic=all&repos=99143276).
|
||||
ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated [web application](https://app.zenhub.com/workspace/o/heptio/velero/boards?filterLogic=all&repos=99143276).
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
# Examples
|
||||
|
||||
This directory contains sample YAML config files for running Ark on each core provider. Starting with v0.10, these files are packaged into [the Ark release tarballs][2], and we highly recommend that you use the packaged versions of these files to ensure compatibility with the released code.
|
||||
This directory contains sample YAML config files for running Velero on each core provider. Starting with v0.10, these files are packaged into [the Velero release tarballs][2], and we highly recommend that you use the packaged versions of these files to ensure compatibility with the released code.
|
||||
|
||||
* `common/`: Contains manifests to set up Ark. Can be used across cloud provider platforms. (Note that Azure requires its own deployment file due to its unique way of loading credentials).
|
||||
* `common/`: Contains manifests to set up Velero. Can be used across cloud provider platforms. (Note that Azure requires its own deployment file due to its unique way of loading credentials).
|
||||
|
||||
* `minio/`: Used in the [Quickstart][1] to set up [Minio][0], a local S3-compatible object storage service. It provides a convenient way to test Ark without tying you to a specific cloud provider.
|
||||
* `minio/`: Used in the [Quickstart][1] to set up [Minio][0], a local S3-compatible object storage service. It provides a convenient way to test Velero without tying you to a specific cloud provider.
|
||||
|
||||
* `aws/`, `azure/`, `gcp/`, `ibm/`: Contains manifests specific to the given cloud provider's setup.
|
||||
|
||||
[0]: https://github.com/minio/minio
|
||||
[1]: /README.md#quickstart
|
||||
[2]: https://github.com/heptio/ark/releases
|
||||
[2]: https://github.com/heptio/velero/releases
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
|
@ -13,12 +13,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: aws-default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
region: <YOUR_REGION>
|
|
@ -16,30 +16,30 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<VELERO_ROLE_NAME>
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
volumeMounts:
|
||||
|
|
|
@ -16,26 +16,26 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
## uncomment following line and specify values if needed for multiple provider snapshot locations
|
||||
|
@ -50,7 +50,7 @@ spec:
|
|||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
#- name: AWS_CLUSTER_NAME
|
||||
# value: <YOUR_CLUSTER_NAME>
|
||||
|
|
|
@ -16,7 +16,7 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: restic
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -26,7 +26,7 @@ spec:
|
|||
labels:
|
||||
name: restic
|
||||
spec:
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumes:
|
||||
|
@ -39,10 +39,10 @@ spec:
|
|||
- name: scratch
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- restic
|
||||
- server
|
||||
|
@ -59,11 +59,11 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HEPTIO_ARK_NAMESPACE
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
|
|
|
@ -16,29 +16,29 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
## uncomment following line and specify values if needed for multiple provider snapshot locations
|
||||
|
@ -47,7 +47,7 @@ spec:
|
|||
- secretRef:
|
||||
name: cloud-credentials
|
||||
env:
|
||||
- name: ARK_SCRATCH_DIR
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
volumeMounts:
|
||||
- name: plugins
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: azure
|
||||
objectStorage:
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: azure-default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: azure
|
||||
config:
|
|
@ -16,7 +16,7 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: restic
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -26,7 +26,7 @@ spec:
|
|||
labels:
|
||||
name: restic
|
||||
spec:
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumes:
|
||||
|
@ -36,10 +36,10 @@ spec:
|
|||
- name: scratch
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- restic
|
||||
- server
|
||||
|
@ -57,9 +57,9 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HEPTIO_ARK_NAMESPACE
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
|
|
|
@ -16,11 +16,11 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backups.ark.heptio.com
|
||||
name: backups.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -31,11 +31,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: schedules.ark.heptio.com
|
||||
name: schedules.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -46,11 +46,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: restores.ark.heptio.com
|
||||
name: restores.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -61,11 +61,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: downloadrequests.ark.heptio.com
|
||||
name: downloadrequests.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -76,11 +76,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: deletebackuprequests.ark.heptio.com
|
||||
name: deletebackuprequests.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -91,11 +91,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: podvolumebackups.ark.heptio.com
|
||||
name: podvolumebackups.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -106,11 +106,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: podvolumerestores.ark.heptio.com
|
||||
name: podvolumerestores.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -121,11 +121,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: resticrepositories.ark.heptio.com
|
||||
name: resticrepositories.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -136,11 +136,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backupstoragelocations.ark.heptio.com
|
||||
name: backupstoragelocations.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -151,11 +151,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: volumesnapshotlocations.ark.heptio.com
|
||||
name: volumesnapshotlocations.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -166,11 +166,11 @@ spec:
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: serverstatusrequests.ark.heptio.com
|
||||
name: serverstatusrequests.velero.io
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
group: velero.io
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
|
@ -180,28 +180,28 @@ spec:
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: heptio-ark
|
||||
name: velero
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ark
|
||||
namespace: heptio-ark
|
||||
name: velero
|
||||
namespace: velero
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: ark
|
||||
name: velero
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
## 00-prereqs.yaml
|
||||
|
||||
This file contains the prerequisites necessary to run the Ark server:
|
||||
This file contains the prerequisites necessary to run the Velero server:
|
||||
|
||||
- `heptio-ark` namespace
|
||||
- `ark` service account
|
||||
- RBAC rules to grant permissions to the `ark` service account
|
||||
- CRDs for the Ark-specific resources (Backup, Schedule, Restore, etc.)
|
||||
- `velero` namespace
|
||||
- `velero` service account
|
||||
- RBAC rules to grant permissions to the `velero` service account
|
||||
- CRDs for the Velero-specific resources (Backup, Schedule, Restore, etc.)
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: gcp
|
||||
objectStorage:
|
|
@ -13,10 +13,10 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: gcp-default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: gcp
|
||||
provider: gcp
|
|
@ -16,29 +16,29 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
## uncomment following line and specify values if needed for multiple provider snapshot locations
|
||||
|
@ -53,7 +53,7 @@ spec:
|
|||
env:
|
||||
- name: GOOGLE_APPLICATION_CREDENTIALS
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
volumes:
|
||||
- name: cloud-credentials
|
||||
|
|
|
@ -16,7 +16,7 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: restic
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -26,7 +26,7 @@ spec:
|
|||
labels:
|
||||
name: restic
|
||||
spec:
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumes:
|
||||
|
@ -39,10 +39,10 @@ spec:
|
|||
- name: scratch
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- restic
|
||||
- server
|
||||
|
@ -59,11 +59,11 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HEPTIO_ARK_NAMESPACE
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: GOOGLE_APPLICATION_CREDENTIALS
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
|
|
|
@ -13,11 +13,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
|
@ -16,29 +16,29 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
volumeMounts:
|
||||
|
@ -51,7 +51,7 @@ spec:
|
|||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
volumes:
|
||||
- name: cloud-credentials
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
name: minio
|
||||
labels:
|
||||
component: minio
|
||||
|
@ -58,7 +58,7 @@ spec:
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
name: minio
|
||||
labels:
|
||||
component: minio
|
||||
|
@ -78,7 +78,7 @@ spec:
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
name: cloud-credentials
|
||||
labels:
|
||||
component: minio
|
||||
|
@ -92,7 +92,7 @@ stringData:
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
name: minio-setup
|
||||
labels:
|
||||
component: minio
|
||||
|
@ -112,7 +112,7 @@ spec:
|
|||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "mc --config-dir=/config config host add ark http://minio:9000 minio minio123 && mc --config-dir=/config mb -p ark/ark"
|
||||
- "mc --config-dir=/config config host add velero http://minio:9000 minio minio123 && mc --config-dir=/config mb -p velero/velero"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: "/config"
|
||||
|
|
|
@ -13,21 +13,21 @@
|
|||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
apiVersion: velero.io/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: ark
|
||||
bucket: velero
|
||||
config:
|
||||
region: minio
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: http://minio.heptio-ark.svc:9000
|
||||
s3Url: http://minio.velero.svc:9000
|
||||
# Uncomment the following line and provide the value of an externally
|
||||
# available URL for downloading logs, running Ark describe, and more.
|
||||
# available URL for downloading logs, running Velero describe, and more.
|
||||
# publicUrl: https://minio.mycluster.com
|
||||
|
||||
|
|
@ -16,29 +16,29 @@
|
|||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
namespace: velero
|
||||
name: velero
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
component: velero
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- server
|
||||
volumeMounts:
|
||||
|
@ -51,7 +51,7 @@ spec:
|
|||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
volumes:
|
||||
- name: cloud-credentials
|
|
@ -16,7 +16,7 @@ apiVersion: apps/v1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: restic
|
||||
namespace: heptio-ark
|
||||
namespace: velero
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
@ -26,7 +26,7 @@ spec:
|
|||
labels:
|
||||
name: restic
|
||||
spec:
|
||||
serviceAccountName: ark
|
||||
serviceAccountName: velero
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumes:
|
||||
|
@ -39,10 +39,10 @@ spec:
|
|||
- name: scratch
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
- name: velero
|
||||
image: gcr.io/heptio-images/velero:latest
|
||||
command:
|
||||
- /ark
|
||||
- /velero
|
||||
args:
|
||||
- restic
|
||||
- server
|
||||
|
@ -59,11 +59,11 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HEPTIO_ARK_NAMESPACE
|
||||
- name: VELERO_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
- name: VELERO_SCRATCH_DIR
|
||||
value: /scratch
|
||||
|
|
|
@ -4,12 +4,12 @@ This directory contains manifests for two versions of a sample Nginx app under t
|
|||
|
||||
## `base.yaml`
|
||||
|
||||
This is the most basic version of the Nginx app, which can be used to test Ark's backup and restore functionality.
|
||||
This is the most basic version of the Nginx app, which can be used to test Velero's backup and restore functionality.
|
||||
|
||||
*This can be deployed as is.*
|
||||
|
||||
## `with-pv.yaml`
|
||||
|
||||
This sets up an Nginx app that logs to a persistent volume, so that Ark's PV snapshotting functionality can also be tested.
|
||||
This sets up an Nginx app that logs to a persistent volume, so that Velero's PV snapshotting functionality can also be tested.
|
||||
|
||||
*This requires you to first replace the placeholder value `<YOUR_STORAGE_CLASS_NAME>`.*
|
||||
|
|
|
@ -49,10 +49,10 @@ spec:
|
|||
labels:
|
||||
app: nginx
|
||||
annotations:
|
||||
pre.hook.backup.ark.heptio.com/container: fsfreeze
|
||||
pre.hook.backup.ark.heptio.com/command: '["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]'
|
||||
post.hook.backup.ark.heptio.com/container: fsfreeze
|
||||
post.hook.backup.ark.heptio.com/command: '["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]'
|
||||
pre.hook.backup.velero.io/container: fsfreeze
|
||||
pre.hook.backup.velero.io/command: '["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]'
|
||||
post.hook.backup.velero.io/container: fsfreeze
|
||||
post.hook.backup.velero.io/command: '["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]'
|
||||
spec:
|
||||
volumes:
|
||||
- name: nginx-logs
|
||||
|
|
|
@ -30,7 +30,7 @@ rm -rf config/ && cp -r examples/ config/
|
|||
# the "-i'.bak'" flag to sed is necessary, with no space between the flag
|
||||
# and the value, for this to be compatible across BSD/OSX sed and GNU sed.
|
||||
# remove the ".bak" files afterwards (they're copies of the originals).
|
||||
find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/ark:latest|gcr.io/heptio-images/ark:$GIT_TAG|g"
|
||||
find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/velero:latest|gcr.io/heptio-images/velero:$GIT_TAG|g"
|
||||
find config/ -type f -name "*.bak" | xargs rm
|
||||
|
||||
find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/fsfreeze-pause:latest|gcr.io/heptio-images/fsfreeze-pause:$GIT_TAG|g"
|
||||
|
|
|
@ -50,7 +50,7 @@ fi
|
|||
|
||||
echo "${ACTION} goimports"
|
||||
for file in ${files}; do
|
||||
output=$(goimports "${MODE}" -local github.com/heptio/ark "${file}")
|
||||
output=$(goimports "${MODE}" -local github.com/heptio/velero "${file}")
|
||||
if [[ -n "${output}" ]]; then
|
||||
VERIFY_IMPORTS_FAILED=1
|
||||
echo "${output}"
|
||||
|
|
|
@ -32,8 +32,8 @@ cd ${GOPATH}/src/k8s.io/code-generator
|
|||
|
||||
./generate-groups.sh \
|
||||
all \
|
||||
github.com/heptio/ark/pkg/generated \
|
||||
github.com/heptio/ark/pkg/apis \
|
||||
ark:v1 \
|
||||
--go-header-file ${GOPATH}/src/github.com/heptio/ark/hack/boilerplate.go.txt \
|
||||
github.com/heptio/velero/pkg/generated \
|
||||
github.com/heptio/velero/pkg/apis \
|
||||
"ark:v1 velero:v1" \
|
||||
--go-header-file ${GOPATH}/src/github.com/heptio/velero/hack/boilerplate.go.txt \
|
||||
$@
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
VELERO_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${VELERO_ROOT}/_output/bin
|
||||
|
||||
mkdir -p ${BIN}
|
||||
|
||||
|
@ -29,7 +29,7 @@ fi
|
|||
|
||||
OUTPUT_ISSUE_FILE="$1"
|
||||
if [[ -z "${OUTPUT_ISSUE_FILE}" ]]; then
|
||||
OUTPUT_ISSUE_FILE=${ARK_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md
|
||||
OUTPUT_ISSUE_FILE=${VELERO_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md
|
||||
fi
|
||||
|
||||
${BIN}/issue-tmpl-gen ${OUTPUT_ISSUE_FILE}
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
VELERO_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
ISSUE_TEMPLATE_FILE=${ARK_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md
|
||||
ISSUE_TEMPLATE_FILE=${VELERO_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md
|
||||
OUT_TMP_FILE="$(mktemp -d)"/bug_report.md
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,249 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// BackupSpec defines the specification for a Velero backup.
|
||||
type BackupSpec struct {
|
||||
// IncludedNamespaces is a slice of namespace names to include objects
|
||||
// from. If empty, all namespaces are included.
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
|
||||
// ExcludedNamespaces contains a list of namespaces that are not
|
||||
// included in the backup.
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
|
||||
// IncludedResources is a slice of resource names to include
|
||||
// in the backup. If empty, all resources are included.
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
|
||||
// ExcludedResources is a slice of resource names that are not
|
||||
// included in the backup.
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
|
||||
// LabelSelector is a metav1.LabelSelector to filter with
|
||||
// when adding individual objects to the backup. If empty
|
||||
// or nil, all objects are included. Optional.
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
|
||||
// SnapshotVolumes specifies whether to take cloud snapshots
|
||||
// of any PV's referenced in the set of objects included
|
||||
// in the Backup.
|
||||
SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"`
|
||||
|
||||
// TTL is a time.Duration-parseable string describing how long
|
||||
// the Backup should be retained for.
|
||||
TTL metav1.Duration `json:"ttl"`
|
||||
|
||||
// IncludeClusterResources specifies whether cluster-scoped resources
|
||||
// should be included for consideration in the backup.
|
||||
IncludeClusterResources *bool `json:"includeClusterResources"`
|
||||
|
||||
// Hooks represent custom behaviors that should be executed at different phases of the backup.
|
||||
Hooks BackupHooks `json:"hooks"`
|
||||
|
||||
// StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
|
||||
StorageLocation string `json:"storageLocation"`
|
||||
|
||||
// VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
|
||||
VolumeSnapshotLocations []string `json:"volumeSnapshotLocations"`
|
||||
}
|
||||
|
||||
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
|
||||
type BackupHooks struct {
|
||||
// Resources are hooks that should be executed when backing up individual instances of a resource.
|
||||
Resources []BackupResourceHookSpec `json:"resources"`
|
||||
}
|
||||
|
||||
// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on
|
||||
// the rules defined for namespaces, resources, and label selector.
|
||||
type BackupResourceHookSpec struct {
|
||||
// Name is the name of this hook.
|
||||
Name string `json:"name"`
|
||||
// IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies
|
||||
// to all namespaces.
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
// ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
// IncludedResources specifies the resources to which this hook spec applies. If empty, it applies
|
||||
// to all resources.
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
// ExcludedResources specifies the resources to which this hook spec does not apply.
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
// LabelSelector, if specified, filters the resources to which this hook spec applies.
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
// Hooks is a list of BackupResourceHooks to execute. DEPRECATED. Replaced by PreHooks.
|
||||
Hooks []BackupResourceHook `json:"hooks"`
|
||||
// PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup.
|
||||
// These are executed before any "additional items" from item actions are processed.
|
||||
PreHooks []BackupResourceHook `json:"pre,omitempty"`
|
||||
// PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup.
|
||||
// These are executed after all "additional items" from item actions are processed.
|
||||
PostHooks []BackupResourceHook `json:"post,omitempty"`
|
||||
}
|
||||
|
||||
// BackupResourceHook defines a hook for a resource.
|
||||
type BackupResourceHook struct {
|
||||
// Exec defines an exec hook.
|
||||
Exec *ExecHook `json:"exec"`
|
||||
}
|
||||
|
||||
// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod.
|
||||
type ExecHook struct {
|
||||
// Container is the container in the pod where the command should be executed. If not specified,
|
||||
// the pod's first container is used.
|
||||
Container string `json:"container"`
|
||||
// Command is the command and arguments to execute.
|
||||
Command []string `json:"command"`
|
||||
// OnError specifies how Velero should behave if it encounters an error executing this hook.
|
||||
OnError HookErrorMode `json:"onError"`
|
||||
// Timeout defines the maximum amount of time Velero should wait for the hook to complete before
|
||||
// considering the execution a failure.
|
||||
Timeout metav1.Duration `json:"timeout"`
|
||||
}
|
||||
|
||||
// HookErrorMode defines how Velero should treat an error from a hook.
|
||||
type HookErrorMode string
|
||||
|
||||
const (
|
||||
// HookErrorModeContinue means that an error from a hook is acceptable, and the backup can
|
||||
// proceed.
|
||||
HookErrorModeContinue HookErrorMode = "Continue"
|
||||
// HookErrorModeFail means that an error from a hook is problematic, and the backup should be in
|
||||
// error.
|
||||
HookErrorModeFail HookErrorMode = "Fail"
|
||||
)
|
||||
|
||||
// BackupPhase is a string representation of the lifecycle phase
|
||||
// of a Velero backup.
|
||||
type BackupPhase string
|
||||
|
||||
const (
|
||||
// BackupPhaseNew means the backup has been created but not
|
||||
// yet processed by the BackupController.
|
||||
BackupPhaseNew BackupPhase = "New"
|
||||
|
||||
// BackupPhaseFailedValidation means the backup has failed
|
||||
// the controller's validations and therefore will not run.
|
||||
BackupPhaseFailedValidation BackupPhase = "FailedValidation"
|
||||
|
||||
// BackupPhaseInProgress means the backup is currently executing.
|
||||
BackupPhaseInProgress BackupPhase = "InProgress"
|
||||
|
||||
// BackupPhaseCompleted means the backup has run successfully without
|
||||
// errors.
|
||||
BackupPhaseCompleted BackupPhase = "Completed"
|
||||
|
||||
// BackupPhaseFailed means the backup ran but encountered an error that
|
||||
// prevented it from completing successfully.
|
||||
BackupPhaseFailed BackupPhase = "Failed"
|
||||
|
||||
// BackupPhaseDeleting means the backup and all its associated data are being deleted.
|
||||
BackupPhaseDeleting BackupPhase = "Deleting"
|
||||
)
|
||||
|
||||
// BackupStatus captures the current status of a Velero backup.
|
||||
type BackupStatus struct {
|
||||
// Version is the backup format version.
|
||||
Version int `json:"version"`
|
||||
|
||||
// Expiration is when this Backup is eligible for garbage-collection.
|
||||
Expiration metav1.Time `json:"expiration"`
|
||||
|
||||
// Phase is the current state of the Backup.
|
||||
Phase BackupPhase `json:"phase"`
|
||||
|
||||
// VolumeBackups is a map of PersistentVolume names to
|
||||
// information about the backed-up volume in the cloud
|
||||
// provider API.
|
||||
//
|
||||
// Deprecated: this field is considered read-only as of v0.10
|
||||
// and will be removed in a subsequent release. The information
|
||||
// previously contained here is now stored in a file in backup
|
||||
// storage.
|
||||
VolumeBackups map[string]*VolumeBackupInfo `json:"volumeBackups,omitempty"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable).
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// StartTimestamp records the time a backup was started.
|
||||
// Separate from CreationTimestamp, since that value changes
|
||||
// on restores.
|
||||
// The server's time is used for StartTimestamps
|
||||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
|
||||
// CompletionTimestamp records the time a backup was completed.
|
||||
// Completion time is recorded even on failed backups.
|
||||
// Completion time is recorded before uploading the backup object.
|
||||
// The server's time is used for CompletionTimestamps
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
|
||||
// VolumeSnapshotsAttempted is the total number of attempted
|
||||
// volume snapshots for this backup.
|
||||
VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted"`
|
||||
|
||||
// VolumeSnapshotsCompleted is the total number of successfully
|
||||
// completed volume snapshots for this backup.
|
||||
VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted"`
|
||||
}
|
||||
|
||||
// VolumeBackupInfo captures the required information about
|
||||
// a PersistentVolume at backup time to be able to restore
|
||||
// it later.
|
||||
type VolumeBackupInfo struct {
|
||||
// SnapshotID is the ID of the snapshot taken in the cloud
|
||||
// provider API of this volume.
|
||||
SnapshotID string `json:"snapshotID"`
|
||||
|
||||
// Type is the type of the disk/volume in the cloud provider
|
||||
// API.
|
||||
Type string `json:"type"`
|
||||
|
||||
// AvailabilityZone is the where the volume is provisioned
|
||||
// in the cloud provider.
|
||||
AvailabilityZone string `json:"availabilityZone,omitempty"`
|
||||
|
||||
// Iops is the optional value of provisioned IOPS for the
|
||||
// disk/volume in the cloud provider API.
|
||||
Iops *int64 `json:"iops,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Backup is a Velero resource that respresents the capture of Kubernetes
|
||||
// cluster state at a point in time (API objects and associated volume state).
|
||||
type Backup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec BackupSpec `json:"spec"`
|
||||
Status BackupStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupList is a list of Backups.
|
||||
type BackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Backup `json:"items"`
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupStorageLocation is a location where Velero stores backup objects.
|
||||
type BackupStorageLocation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec BackupStorageLocationSpec `json:"spec"`
|
||||
Status BackupStorageLocationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupStorageLocationList is a list of BackupStorageLocations.
|
||||
type BackupStorageLocationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []BackupStorageLocation `json:"items"`
|
||||
}
|
||||
|
||||
// StorageType represents the type of storage that a backup location uses.
|
||||
// ObjectStorage must be non-nil, since it is currently the only supported StorageType.
|
||||
type StorageType struct {
|
||||
ObjectStorage *ObjectStorageLocation `json:"objectStorage,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
|
||||
type ObjectStorageLocation struct {
|
||||
// Bucket is the bucket to use for object storage.
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// Prefix is the path inside a bucket to use for Velero storage. Optional.
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationSpec defines the specification for a Velero BackupStorageLocation.
|
||||
type BackupStorageLocationSpec struct {
|
||||
// Provider is the provider of the backup storage.
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Config is for provider-specific configuration fields.
|
||||
Config map[string]string `json:"config"`
|
||||
|
||||
StorageType `json:",inline"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecyle phase of a Velero BackupStorageLocation.
|
||||
type BackupStorageLocationPhase string
|
||||
|
||||
const (
|
||||
// BackupStorageLocationPhaseAvailable means the location is available to read and write from.
|
||||
BackupStorageLocationPhaseAvailable BackupStorageLocationPhase = "Available"
|
||||
|
||||
// BackupStorageLocationPhaseUnavailable means the location is unavailable to read and write from.
|
||||
BackupStorageLocationPhaseUnavailable BackupStorageLocationPhase = "Unavailable"
|
||||
)
|
||||
|
||||
// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation.
|
||||
type BackupStorageLocationAccessMode string
|
||||
|
||||
const (
|
||||
// BackupStorageLocationAccessModeReadOnly represents read-only access to a BackupStorageLocation.
|
||||
BackupStorageLocationAccessModeReadOnly BackupStorageLocationAccessMode = "ReadOnly"
|
||||
|
||||
// BackupStorageLocationAccessModeReadWrite represents read and write access to a BackupStorageLocation.
|
||||
BackupStorageLocationAccessModeReadWrite BackupStorageLocationAccessMode = "ReadWrite"
|
||||
)
|
||||
|
||||
// BackupStorageLocationStatus describes the current status of a Velero BackupStorageLocation.
|
||||
type BackupStorageLocationStatus struct {
|
||||
Phase BackupStorageLocationPhase `json:"phase,omitempty"`
|
||||
AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"`
|
||||
LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"`
|
||||
LastSyncedTime metav1.Time `json:"lastSyncedTime,omitempty"`
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
// DefaultNamespace is the Kubernetes namespace that is used by default for
|
||||
// the Velero server and API objects.
|
||||
DefaultNamespace = "velero"
|
||||
|
||||
// ResourcesDir is a top-level directory expected in backups which contains sub-directories
|
||||
// for each resource type in the backup.
|
||||
ResourcesDir = "resources"
|
||||
|
||||
// MetadataDir is a top-level directory expected in backups which contains
|
||||
// files that store metadata about the backup, such as the backup version.
|
||||
MetadataDir = "metadata"
|
||||
|
||||
// RestoreLabelKey is the label key that's applied to all resources that
|
||||
// are created during a restore. This is applied for ease of identification
|
||||
// of restored resources. The value will be the restore's name.
|
||||
//
|
||||
// This label is DEPRECATED as of v0.10 and will be removed entirely as of
|
||||
// v1.0 and replaced with RestoreNameLabel ("velero.io/restore-name").
|
||||
RestoreLabelKey = "velero-restore"
|
||||
|
||||
// ClusterScopedDir is the name of the directory containing cluster-scoped
|
||||
// resources within a Velero backup.
|
||||
ClusterScopedDir = "cluster"
|
||||
|
||||
// NamespaceScopedDir is the name of the directory containing namespace-scoped
|
||||
// resource within a Velero backup.
|
||||
NamespaceScopedDir = "namespaces"
|
||||
)
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// DeleteBackupRequestSpec is the specification for which backups to delete.
|
||||
type DeleteBackupRequestSpec struct {
|
||||
BackupName string `json:"backupName"`
|
||||
}
|
||||
|
||||
// DeleteBackupRequestPhase represents the lifecycle phase of a DeleteBackupRequest.
|
||||
type DeleteBackupRequestPhase string
|
||||
|
||||
const (
|
||||
// DeleteBackupRequestPhaseNew means the DeleteBackupRequest has not been processed yet.
|
||||
DeleteBackupRequestPhaseNew DeleteBackupRequestPhase = "New"
|
||||
// DeleteBackupRequestPhaseInProgress means the DeleteBackupRequest is being processed.
|
||||
DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress"
|
||||
// DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed.
|
||||
DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed"
|
||||
)
|
||||
|
||||
// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest.
|
||||
type DeleteBackupRequestStatus struct {
|
||||
// Phase is the current state of the DeleteBackupRequest.
|
||||
Phase DeleteBackupRequestPhase `json:"phase"`
|
||||
// Errors contains any errors that were encountered during the deletion process.
|
||||
Errors []string `json:"errors"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DeleteBackupRequest is a request to delete one or more backups.
|
||||
type DeleteBackupRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec DeleteBackupRequestSpec `json:"spec"`
|
||||
Status DeleteBackupRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DeleteBackupRequestList is a list of DeleteBackupRequests.
|
||||
type DeleteBackupRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []DeleteBackupRequest `json:"items"`
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
// +groupName=velero.io
|
||||
package v1
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// DownloadRequestSpec is the specification for a download request.
|
||||
type DownloadRequestSpec struct {
|
||||
// Target is what to download (e.g. logs for a backup).
|
||||
Target DownloadTarget `json:"target"`
|
||||
}
|
||||
|
||||
// DownloadTargetKind represents what type of file to download.
|
||||
type DownloadTargetKind string
|
||||
|
||||
const (
|
||||
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
|
||||
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
|
||||
DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots"
|
||||
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
|
||||
DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults"
|
||||
)
|
||||
|
||||
// DownloadTarget is the specification for what kind of file to download, and the name of the
|
||||
// resource with which it's associated.
|
||||
type DownloadTarget struct {
|
||||
// Kind is the type of file to download.
|
||||
Kind DownloadTargetKind `json:"kind"`
|
||||
// Name is the name of the kubernetes resource with which the file is associated.
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DownloadRequestPhase represents the lifecycle phase of a DownloadRequest.
|
||||
type DownloadRequestPhase string
|
||||
|
||||
const (
|
||||
// DownloadRequestPhaseNew means the DownloadRequest has not been processed by the
|
||||
// DownloadRequestController yet.
|
||||
DownloadRequestPhaseNew DownloadRequestPhase = "New"
|
||||
// DownloadRequestPhaseProcessed means the DownloadRequest has been processed by the
|
||||
// DownloadRequestController.
|
||||
DownloadRequestPhaseProcessed DownloadRequestPhase = "Processed"
|
||||
)
|
||||
|
||||
// DownloadRequestStatus is the current status of a DownloadRequest.
|
||||
type DownloadRequestStatus struct {
|
||||
// Phase is the current state of the DownloadRequest.
|
||||
Phase DownloadRequestPhase `json:"phase"`
|
||||
// DownloadURL contains the pre-signed URL for the target file.
|
||||
DownloadURL string `json:"downloadURL"`
|
||||
// Expiration is when this DownloadRequest expires and can be deleted by the system.
|
||||
Expiration metav1.Time `json:"expiration"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DownloadRequest is a request to download an artifact from backup object storage, such as a backup
|
||||
// log file.
|
||||
type DownloadRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec DownloadRequestSpec `json:"spec"`
|
||||
Status DownloadRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DownloadRequestList is a list of DownloadRequests.
|
||||
type DownloadRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []DownloadRequest `json:"items"`
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
// BackupNameLabel is the label key used to identify a backup by name.
|
||||
BackupNameLabel = "velero.io/backup-name"
|
||||
|
||||
// BackupUIDLabel is the label key used to identify a backup by uid.
|
||||
BackupUIDLabel = "velero.io/backup-uid"
|
||||
|
||||
// RestoreNameLabel is the label key used to identify a restore by name.
|
||||
RestoreNameLabel = "velero.io/restore-name"
|
||||
|
||||
// ScheduleNameLabel is the label key used to identify a schedule by name.
|
||||
ScheduleNameLabel = "velero.io/schedule-name"
|
||||
|
||||
// RestoreUIDLabel is the label key used to identify a restore by uid.
|
||||
RestoreUIDLabel = "velero.io/restore-uid"
|
||||
|
||||
// PodUIDLabel is the label key used to identify a pod by uid.
|
||||
PodUIDLabel = "velero.io/pod-uid"
|
||||
|
||||
// PodVolumeOperationTimeoutAnnotation is the annotation key used to apply
|
||||
// a backup/restore-specific timeout value for pod volume operations (i.e.
|
||||
// restic backups/restores).
|
||||
PodVolumeOperationTimeoutAnnotation = "velero.io/pod-volume-timeout"
|
||||
|
||||
// StorageLocationLabel is the label key used to identify the storage
|
||||
// location of a backup.
|
||||
StorageLocationLabel = "velero.io/storage-location"
|
||||
|
||||
// ResticVolumeNamespaceLabel is the label key used to identify which
|
||||
// namespace a restic repository stores pod volume backups for.
|
||||
ResticVolumeNamespaceLabel = "velero.io/volume-namespace"
|
||||
)
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// PodVolumeBackupSpec is the specification for a PodVolumeBackup.
|
||||
type PodVolumeBackupSpec struct {
|
||||
// Node is the name of the node that the Pod is running on.
|
||||
Node string `json:"node"`
|
||||
|
||||
// Pod is a reference to the pod containing the volume to be backed up.
|
||||
Pod corev1api.ObjectReference `json:"pod"`
|
||||
|
||||
// Volume is the name of the volume within the Pod to be backed
|
||||
// up.
|
||||
Volume string `json:"volume"`
|
||||
|
||||
// BackupStorageLocation is the name of the backup storage location
|
||||
// where the restic repository is stored.
|
||||
BackupStorageLocation string `json:"backupStorageLocation"`
|
||||
|
||||
// RepoIdentifier is the restic repository identifier.
|
||||
RepoIdentifier string `json:"repoIdentifier"`
|
||||
|
||||
// Tags are a map of key-value pairs that should be applied to the
|
||||
// volume backup as tags.
|
||||
Tags map[string]string `json:"tags"`
|
||||
}
|
||||
|
||||
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
|
||||
type PodVolumeBackupPhase string
|
||||
|
||||
const (
|
||||
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
|
||||
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
|
||||
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
|
||||
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
|
||||
)
|
||||
|
||||
// PodVolumeBackupStatus is the current status of a PodVolumeBackup.
|
||||
type PodVolumeBackupStatus struct {
|
||||
// Phase is the current state of the PodVolumeBackup.
|
||||
Phase PodVolumeBackupPhase `json:"phase"`
|
||||
|
||||
// Path is the full path within the controller pod being backed up.
|
||||
Path string `json:"path"`
|
||||
|
||||
// SnapshotID is the identifier for the snapshot of the pod volume.
|
||||
SnapshotID string `json:"snapshotID"`
|
||||
|
||||
// Message is a message about the pod volume backup's status.
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type PodVolumeBackup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec PodVolumeBackupSpec `json:"spec"`
|
||||
Status PodVolumeBackupStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodVolumeBackupList is a list of PodVolumeBackups.
|
||||
type PodVolumeBackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []PodVolumeBackup `json:"items"`
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// PodVolumeRestoreSpec is the specification for a PodVolumeRestore.
|
||||
type PodVolumeRestoreSpec struct {
|
||||
// Pod is a reference to the pod containing the volume to be restored.
|
||||
Pod corev1api.ObjectReference `json:"pod"`
|
||||
|
||||
// Volume is the name of the volume within the Pod to be restored.
|
||||
Volume string `json:"volume"`
|
||||
|
||||
// BackupStorageLocation is the name of the backup storage location
|
||||
// where the restic repository is stored.
|
||||
BackupStorageLocation string `json:"backupStorageLocation"`
|
||||
|
||||
// RepoIdentifier is the restic repository identifier.
|
||||
RepoIdentifier string `json:"repoIdentifier"`
|
||||
|
||||
// SnapshotID is the ID of the volume snapshot to be restored.
|
||||
SnapshotID string `json:"snapshotID"`
|
||||
}
|
||||
|
||||
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
|
||||
type PodVolumeRestorePhase string
|
||||
|
||||
const (
|
||||
PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New"
|
||||
PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress"
|
||||
PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed"
|
||||
PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed"
|
||||
)
|
||||
|
||||
// PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
|
||||
type PodVolumeRestoreStatus struct {
|
||||
// Phase is the current state of the PodVolumeRestore.
|
||||
Phase PodVolumeRestorePhase `json:"phase"`
|
||||
|
||||
// Message is a message about the pod volume restore's status.
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type PodVolumeRestore struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec PodVolumeRestoreSpec `json:"spec"`
|
||||
Status PodVolumeRestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodVolumeRestoreList is a list of PodVolumeRestores.
|
||||
type PodVolumeRestoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []PodVolumeRestore `json:"items"`
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
// SchemeBuilder collects the scheme builder functions for the Velero API
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
|
||||
// AddToScheme applies the SchemeBuilder functions to a specified scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// GroupName is the group name for the Velero API
|
||||
const GroupName = "velero.io"
|
||||
|
||||
// SchemeGroupVersion is the GroupVersion for the Velero API
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
|
||||
|
||||
// Resource gets a Velero GroupResource for a specified resource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
type typeInfo struct {
|
||||
PluralName string
|
||||
ItemType runtime.Object
|
||||
ItemListType runtime.Object
|
||||
}
|
||||
|
||||
func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo {
|
||||
return typeInfo{
|
||||
PluralName: pluralName,
|
||||
ItemType: itemType,
|
||||
ItemListType: itemListType,
|
||||
}
|
||||
}
|
||||
|
||||
// CustomResources returns a map of all custom resources within the Velero
|
||||
// API group, keyed on Kind.
|
||||
func CustomResources() map[string]typeInfo {
|
||||
return map[string]typeInfo{
|
||||
"Backup": newTypeInfo("backups", &Backup{}, &BackupList{}),
|
||||
"Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}),
|
||||
"Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}),
|
||||
"DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}),
|
||||
"DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}),
|
||||
"PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}),
|
||||
"PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}),
|
||||
"ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}),
|
||||
"BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}),
|
||||
"VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}),
|
||||
"ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}),
|
||||
}
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
for _, typeInfo := range CustomResources() {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType)
|
||||
}
|
||||
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ResticRepositorySpec is the specification for a ResticRepository.
|
||||
type ResticRepositorySpec struct {
|
||||
// VolumeNamespace is the namespace this restic repository contains
|
||||
// pod volume backups for.
|
||||
VolumeNamespace string `json:"volumeNamespace"`
|
||||
|
||||
// BackupStorageLocation is the name of the BackupStorageLocation
|
||||
// that should contain this repository.
|
||||
BackupStorageLocation string `json:"backupStorageLocation"`
|
||||
|
||||
// ResticIdentifier is the full restic-compatible string for identifying
|
||||
// this repository.
|
||||
ResticIdentifier string `json:"resticIdentifier"`
|
||||
|
||||
// MaintenanceFrequency is how often maintenance should be run.
|
||||
MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"`
|
||||
}
|
||||
|
||||
// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository.
|
||||
type ResticRepositoryPhase string
|
||||
|
||||
const (
|
||||
ResticRepositoryPhaseNew ResticRepositoryPhase = "New"
|
||||
ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready"
|
||||
ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady"
|
||||
)
|
||||
|
||||
// ResticRepositoryStatus is the current status of a ResticRepository.
|
||||
type ResticRepositoryStatus struct {
|
||||
// Phase is the current state of the ResticRepository.
|
||||
Phase ResticRepositoryPhase `json:"phase"`
|
||||
|
||||
// Message is a message about the current status of the ResticRepository.
|
||||
Message string `json:"message"`
|
||||
|
||||
// LastMaintenanceTime is the last time maintenance was run.
|
||||
LastMaintenanceTime metav1.Time `json:"lastMaintenanceTime"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type ResticRepository struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec ResticRepositorySpec `json:"spec"`
|
||||
Status ResticRepositoryStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResticRepositoryList is a list of ResticRepositories.
|
||||
type ResticRepositoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []ResticRepository `json:"items"`
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// RestoreSpec defines the specification for an Ark restore.
|
||||
type RestoreSpec struct {
|
||||
// BackupName is the unique name of the Ark backup to restore
|
||||
// from.
|
||||
BackupName string `json:"backupName"`
|
||||
|
||||
// ScheduleName is the unique name of the Ark schedule to restore
|
||||
// from. If specified, and BackupName is empty, Ark will restore
|
||||
// from the most recent successful backup created from this schedule.
|
||||
ScheduleName string `json:"scheduleName,omitempty"`
|
||||
|
||||
// IncludedNamespaces is a slice of namespace names to include objects
|
||||
// from. If empty, all namespaces are included.
|
||||
IncludedNamespaces []string `json:"includedNamespaces"`
|
||||
|
||||
// ExcludedNamespaces contains a list of namespaces that are not
|
||||
// included in the restore.
|
||||
ExcludedNamespaces []string `json:"excludedNamespaces"`
|
||||
|
||||
// IncludedResources is a slice of resource names to include
|
||||
// in the restore. If empty, all resources in the backup are included.
|
||||
IncludedResources []string `json:"includedResources"`
|
||||
|
||||
// ExcludedResources is a slice of resource names that are not
|
||||
// included in the restore.
|
||||
ExcludedResources []string `json:"excludedResources"`
|
||||
|
||||
// NamespaceMapping is a map of source namespace names
|
||||
// to target namespace names to restore into. Any source
|
||||
// namespaces not included in the map will be restored into
|
||||
// namespaces of the same name.
|
||||
NamespaceMapping map[string]string `json:"namespaceMapping"`
|
||||
|
||||
// LabelSelector is a metav1.LabelSelector to filter with
|
||||
// when restoring individual objects from the backup. If empty
|
||||
// or nil, all objects are included. Optional.
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
|
||||
// RestorePVs specifies whether to restore all included
|
||||
// PVs from snapshot (via the cloudprovider).
|
||||
RestorePVs *bool `json:"restorePVs,omitempty"`
|
||||
|
||||
// IncludeClusterResources specifies whether cluster-scoped resources
|
||||
// should be included for consideration in the restore. If null, defaults
|
||||
// to true.
|
||||
IncludeClusterResources *bool `json:"includeClusterResources,omitempty"`
|
||||
}
|
||||
|
||||
// RestorePhase is a string representation of the lifecycle phase
|
||||
// of an Ark restore
|
||||
type RestorePhase string
|
||||
|
||||
const (
|
||||
// RestorePhaseNew means the restore has been created but not
|
||||
// yet processed by the RestoreController
|
||||
RestorePhaseNew RestorePhase = "New"
|
||||
|
||||
// RestorePhaseFailedValidation means the restore has failed
|
||||
// the controller's validations and therefore will not run.
|
||||
RestorePhaseFailedValidation RestorePhase = "FailedValidation"
|
||||
|
||||
// RestorePhaseInProgress means the restore is currently executing.
|
||||
RestorePhaseInProgress RestorePhase = "InProgress"
|
||||
|
||||
// RestorePhaseCompleted means the restore has finished executing.
|
||||
// Any relevant warnings or errors will be captured in the Status.
|
||||
RestorePhaseCompleted RestorePhase = "Completed"
|
||||
|
||||
// RestorePhaseFailed means the restore was unable to execute.
|
||||
// The failing error is recorded in status.FailureReason.
|
||||
RestorePhaseFailed RestorePhase = "Failed"
|
||||
)
|
||||
|
||||
// RestoreStatus captures the current status of an Ark restore
|
||||
type RestoreStatus struct {
|
||||
// Phase is the current state of the Restore
|
||||
Phase RestorePhase `json:"phase"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable)
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// Warnings is a count of all warning messages that were generated during
|
||||
// execution of the restore. The actual warnings are stored in object storage.
|
||||
Warnings int `json:"warnings"`
|
||||
|
||||
// Errors is a count of all error messages that were generated during
|
||||
// execution of the restore. The actual errors are stored in object storage.
|
||||
Errors int `json:"errors"`
|
||||
|
||||
// FailureReason is an error that caused the entire restore to fail.
|
||||
FailureReason string `json:"failureReason"`
|
||||
}
|
||||
|
||||
// RestoreResult is a collection of messages that were generated
|
||||
// during execution of a restore. This will typically store either
|
||||
// warning or error messages.
|
||||
type RestoreResult struct {
|
||||
// Ark is a slice of messages related to the operation of Ark
|
||||
// itself (for example, messages related to connecting to the
|
||||
// cloud, reading a backup file, etc.)
|
||||
// TODO(1.0) Remove this field. Currently maintained for backwards compatibility.
|
||||
Ark []string `json:"ark,omitempty"`
|
||||
|
||||
// Velero is a slice of messages related to the operation of Velero
|
||||
// itself (for example, messages related to connecting to the
|
||||
// cloud, reading a backup file, etc.)
|
||||
Velero []string `json:"velero,omitempty"`
|
||||
|
||||
// Cluster is a slice of messages related to restoring cluster-
|
||||
// scoped resources.
|
||||
Cluster []string `json:"cluster,omitempty"`
|
||||
|
||||
// Namespaces is a map of namespace name to slice of messages
|
||||
// related to restoring namespace-scoped resources.
|
||||
Namespaces map[string][]string `json:"namespaces,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Restore is an Ark resource that represents the application of
|
||||
// resources from an Ark backup to a target Kubernetes cluster.
|
||||
type Restore struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec RestoreSpec `json:"spec"`
|
||||
Status RestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RestoreList is a list of Restores.
|
||||
type RestoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Restore `json:"items"`
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// ScheduleSpec defines the specification for a Velero schedule
|
||||
type ScheduleSpec struct {
|
||||
// Template is the definition of the Backup to be run
|
||||
// on the provided schedule
|
||||
Template BackupSpec `json:"template"`
|
||||
|
||||
// Schedule is a Cron expression defining when to run
|
||||
// the Backup.
|
||||
Schedule string `json:"schedule"`
|
||||
}
|
||||
|
||||
// SchedulePhase is a string representation of the lifecycle phase
|
||||
// of a Velero schedule
|
||||
type SchedulePhase string
|
||||
|
||||
const (
|
||||
// SchedulePhaseNew means the schedule has been created but not
|
||||
// yet processed by the ScheduleController
|
||||
SchedulePhaseNew SchedulePhase = "New"
|
||||
|
||||
// SchedulePhaseEnabled means the schedule has been validated and
|
||||
// will now be triggering backups according to the schedule spec.
|
||||
SchedulePhaseEnabled SchedulePhase = "Enabled"
|
||||
|
||||
// SchedulePhaseFailedValidation means the schedule has failed
|
||||
// the controller's validations and therefore will not trigger backups.
|
||||
SchedulePhaseFailedValidation SchedulePhase = "FailedValidation"
|
||||
)
|
||||
|
||||
// ScheduleStatus captures the current state of a Velero schedule
|
||||
type ScheduleStatus struct {
|
||||
// Phase is the current phase of the Schedule
|
||||
Phase SchedulePhase `json:"phase"`
|
||||
|
||||
// LastBackup is the last time a Backup was run for this
|
||||
// Schedule schedule
|
||||
LastBackup metav1.Time `json:"lastBackup"`
|
||||
|
||||
// ValidationErrors is a slice of all validation errors (if
|
||||
// applicable)
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Schedule is a Velero resource that represents a pre-scheduled or
|
||||
// periodic Backup that should be run.
|
||||
type Schedule struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec ScheduleSpec `json:"spec"`
|
||||
Status ScheduleStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ScheduleList is a list of Schedules.
|
||||
type ScheduleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []Schedule `json:"items"`
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServerStatusRequest is a request to access current status information about
|
||||
// the Velero server.
|
||||
type ServerStatusRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec ServerStatusRequestSpec `json:"spec"`
|
||||
Status ServerStatusRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ServerStatusRequestSpec is the specification for a ServerStatusRequest.
|
||||
type ServerStatusRequestSpec struct {
|
||||
}
|
||||
|
||||
// ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest.
|
||||
type ServerStatusRequestPhase string
|
||||
|
||||
const (
|
||||
// ServerStatusRequestPhaseNew means the ServerStatusRequest has not been processed yet.
|
||||
ServerStatusRequestPhaseNew ServerStatusRequestPhase = "New"
|
||||
// ServerStatusRequestPhaseProcessed means the ServerStatusRequest has been processed.
|
||||
ServerStatusRequestPhaseProcessed ServerStatusRequestPhase = "Processed"
|
||||
)
|
||||
|
||||
// ServerStatusRequestStatus is the current status of a ServerStatusRequest.
|
||||
type ServerStatusRequestStatus struct {
|
||||
// Phase is the current lifecycle phase of the ServerStatusRequest.
|
||||
Phase ServerStatusRequestPhase `json:"phase"`
|
||||
|
||||
// ProcessedTimestamp is when the ServerStatusRequest was processed
|
||||
// by the ServerStatusRequestController.
|
||||
ProcessedTimestamp metav1.Time `json:"processedTimestamp"`
|
||||
|
||||
// ServerVersion is the Velero server version.
|
||||
ServerVersion string `json:"serverVersion"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServerStatusRequestList is a list of ServerStatusRequests.
|
||||
type ServerStatusRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []ServerStatusRequest `json:"items"`
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VolumeSnapshotLocation is a location where Velero stores volume snapshots.
|
||||
type VolumeSnapshotLocation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec VolumeSnapshotLocationSpec `json:"spec"`
|
||||
Status VolumeSnapshotLocationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations.
|
||||
type VolumeSnapshotLocationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []VolumeSnapshotLocation `json:"items"`
|
||||
}
|
||||
|
||||
// VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation.
|
||||
type VolumeSnapshotLocationSpec struct {
|
||||
// Provider is the provider of the volume storage.
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Config is for provider-specific configuration fields.
|
||||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
||||
// VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation.
|
||||
type VolumeSnapshotLocationPhase string
|
||||
|
||||
const (
|
||||
// VolumeSnapshotLocationPhaseAvailable means the location is available to read and write from.
|
||||
VolumeSnapshotLocationPhaseAvailable VolumeSnapshotLocationPhase = "Available"
|
||||
|
||||
// VolumeSnapshotLocationPhaseUnavailable means the location is unavailable to read and write from.
|
||||
VolumeSnapshotLocationPhaseUnavailable VolumeSnapshotLocationPhase = "Unavailable"
|
||||
)
|
||||
|
||||
// VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation.
|
||||
type VolumeSnapshotLocationStatus struct {
|
||||
Phase VolumeSnapshotLocationPhase `json:"phase,omitempty"`
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -32,17 +32,17 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/podexec"
|
||||
"github.com/heptio/ark/pkg/restic"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
api "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/cloudprovider"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/podexec"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
"github.com/heptio/velero/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/velero/pkg/util/kube"
|
||||
)
|
||||
|
||||
// BackupVersion is the current backup version for Ark.
|
||||
// BackupVersion is the current backup version for Velero.
|
||||
const BackupVersion = 1
|
||||
|
||||
// Backupper performs backups.
|
||||
|
|
|
@ -22,8 +22,8 @@ import (
|
|||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
)
|
||||
|
||||
// backupPVAction inspects a PersistentVolumeClaim for the PersistentVolume
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/kuberesource"
|
||||
velerotest "github.com/heptio/velero/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestBackupPVAction(t *testing.T) {
|
||||
|
@ -39,7 +39,7 @@ func TestBackupPVAction(t *testing.T) {
|
|||
|
||||
backup := &v1.Backup{}
|
||||
|
||||
a := NewBackupPVAction(arktest.NewLogger())
|
||||
a := NewBackupPVAction(velerotest.NewLogger())
|
||||
|
||||
// no spec.volumeName should result in no error
|
||||
// and no additional items
|
||||
|
|
|
@ -35,15 +35,15 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/podexec"
|
||||
"github.com/heptio/ark/pkg/restic"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
"github.com/heptio/ark/pkg/util/logging"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
"github.com/heptio/velero/pkg/client"
|
||||
"github.com/heptio/velero/pkg/discovery"
|
||||
"github.com/heptio/velero/pkg/podexec"
|
||||
"github.com/heptio/velero/pkg/restic"
|
||||
"github.com/heptio/velero/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/velero/pkg/util/kube"
|
||||
"github.com/heptio/velero/pkg/util/logging"
|
||||
velerotest "github.com/heptio/velero/pkg/util/test"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -133,7 +133,7 @@ func TestResolveActions(t *testing.T) {
|
|||
{Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"},
|
||||
{Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
actual, err := resolveActions(test.input, discoveryHelper)
|
||||
gotError := err != nil
|
||||
|
@ -202,7 +202,7 @@ func TestGetResourceIncludesExcludes(t *testing.T) {
|
|||
{Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"},
|
||||
{Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
actual := getResourceIncludesExcludes(discoveryHelper, test.includes, test.excludes)
|
||||
|
||||
|
@ -470,8 +470,8 @@ func TestBackup(t *testing.T) {
|
|||
Backup: test.backup,
|
||||
}
|
||||
|
||||
discoveryHelper := &arktest.FakeDiscoveryHelper{
|
||||
Mapper: &arktest.FakeMapper{
|
||||
discoveryHelper := &velerotest.FakeDiscoveryHelper{
|
||||
Mapper: &velerotest.FakeMapper{
|
||||
Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
{Resource: "cm"}: {Group: "", Version: "v1", Resource: "configmaps"},
|
||||
{Resource: "csr"}: {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||
|
@ -485,9 +485,9 @@ func TestBackup(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
dynamicFactory := new(arktest.FakeDynamicFactory)
|
||||
dynamicFactory := new(velerotest.FakeDynamicFactory)
|
||||
|
||||
podCommandExecutor := &arktest.MockPodCommandExecutor{}
|
||||
podCommandExecutor := &velerotest.MockPodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
groupBackupperFactory := &mockGroupBackupperFactory{}
|
||||
|
@ -540,7 +540,7 @@ func TestBackup(t *testing.T) {
|
|||
func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
||||
groupBackupperFactory := &mockGroupBackupperFactory{}
|
||||
kb := &kubernetesBackupper{
|
||||
discoveryHelper: new(arktest.FakeDiscoveryHelper),
|
||||
discoveryHelper: new(velerotest.FakeDiscoveryHelper),
|
||||
groupBackupperFactory: groupBackupperFactory,
|
||||
}
|
||||
|
||||
|
@ -563,7 +563,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
|||
mock.Anything,
|
||||
).Return(&mockGroupBackupper{})
|
||||
|
||||
assert.NoError(t, kb.Backup(arktest.NewLogger(), &Request{Backup: &v1.Backup{}}, &bytes.Buffer{}, nil, nil))
|
||||
assert.NoError(t, kb.Backup(velerotest.NewLogger(), &Request{Backup: &v1.Backup{}}, &bytes.Buffer{}, nil, nil))
|
||||
|
||||
// mutate the cohabitatingResources map that was used in the first backup to simulate
|
||||
// the first backup process having done so.
|
||||
|
@ -590,7 +590,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
|
|||
mock.Anything,
|
||||
).Return(&mockGroupBackupper{})
|
||||
|
||||
assert.NoError(t, kb.Backup(arktest.NewLogger(), &Request{Backup: new(v1.Backup)}, new(bytes.Buffer), nil, nil))
|
||||
assert.NoError(t, kb.Backup(velerotest.NewLogger(), &Request{Backup: new(v1.Backup)}, new(bytes.Buffer), nil, nil))
|
||||
assert.NotEqual(t, firstCohabitatingResources, secondCohabitatingResources)
|
||||
for _, resource := range secondCohabitatingResources {
|
||||
assert.False(t, resource.seen)
|
||||
|
@ -770,7 +770,7 @@ func TestGetResourceHook(t *testing.T) {
|
|||
{Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"},
|
||||
{Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
actual, err := getResourceHook(test.hookSpec, discoveryHelper)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
// NewDeleteBackupRequest creates a DeleteBackupRequest for the backup identified by name and uid.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue