Merge pull request #1748 from nrb/fix-1743
Use custom namespace when creating Velero clientspull/1761/head
commit
3c8020e922
|
@ -0,0 +1 @@
|
|||
Use VELERO_NAMESPACE to determine what namespace Velero server is running in. For any v1.0 installations using a different namespace, the VELERO_NAMESPACE environment variable will need to be set to the correct namespace.
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -29,12 +29,20 @@ import (
|
|||
|
||||
// Config returns a *rest.Config, using either the kubeconfig (if specified) or an in-cluster
|
||||
// configuration.
|
||||
func Config(kubeconfig, kubecontext, baseName string) (*rest.Config, error) {
|
||||
func Config(kubeconfig, kubecontext, baseName string, qps float32, burst int) (*rest.Config, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.ExplicitPath = kubeconfig
|
||||
configOverrides := &clientcmd.ConfigOverrides{CurrentContext: kubecontext}
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
clientConfig, err := kubeConfig.ClientConfig()
|
||||
if qps > 0.0 {
|
||||
clientConfig.QPS = qps
|
||||
}
|
||||
|
||||
if burst > 0 {
|
||||
clientConfig.Burst = burst
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
clientset "github.com/heptio/velero/pkg/generated/clientset/versioned"
|
||||
|
@ -42,6 +43,17 @@ type Factory interface {
|
|||
// DynamicClient returns a Kubernetes dynamic client. It uses the following priority to specify the cluster
|
||||
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
|
||||
DynamicClient() (dynamic.Interface, error)
|
||||
// SetBasename changes the basename for an already-constructed client.
|
||||
// This is useful for generating clients that require a different user-agent string below the root `velero`
|
||||
// command, such as the server subcommand.
|
||||
SetBasename(string)
|
||||
// SetClientQPS sets the Queries Per Second for a client.
|
||||
SetClientQPS(float32)
|
||||
// SetClientBurst sets the Burst for a client.
|
||||
SetClientBurst(int)
|
||||
// ClientConfig returns a rest.Config struct used for client-go clients.
|
||||
ClientConfig() (*rest.Config, error)
|
||||
// Namespace returns the namespace which the Factory will create clients for.
|
||||
Namespace() string
|
||||
}
|
||||
|
||||
|
@ -51,6 +63,8 @@ type factory struct {
|
|||
kubecontext string
|
||||
baseName string
|
||||
namespace string
|
||||
clientQPS float32
|
||||
clientBurst int
|
||||
}
|
||||
|
||||
// NewFactory returns a Factory.
|
||||
|
@ -60,12 +74,19 @@ func NewFactory(baseName string) Factory {
|
|||
baseName: baseName,
|
||||
}
|
||||
|
||||
f.namespace = os.Getenv("VELERO_NAMESPACE")
|
||||
|
||||
if config, err := LoadConfig(); err == nil {
|
||||
f.namespace = config[ConfigKeyNamespace]
|
||||
// Only override the namespace if the config key is set
|
||||
if _, ok := config[ConfigKeyNamespace]; ok {
|
||||
f.namespace = config[ConfigKeyNamespace]
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "WARNING: error retrieving namespace from config file: %v\n", err)
|
||||
}
|
||||
|
||||
// We didn't get the namespace via env var or config file, so use the default.
|
||||
// Command line flags will override when BindFlags is called.
|
||||
if f.namespace == "" {
|
||||
f.namespace = v1.DefaultNamespace
|
||||
}
|
||||
|
@ -81,8 +102,12 @@ func (f *factory) BindFlags(flags *pflag.FlagSet) {
|
|||
flags.AddFlagSet(f.flags)
|
||||
}
|
||||
|
||||
func (f *factory) ClientConfig() (*rest.Config, error) {
|
||||
return Config(f.kubeconfig, f.kubecontext, f.baseName, f.clientQPS, f.clientBurst)
|
||||
}
|
||||
|
||||
func (f *factory) Client() (clientset.Interface, error) {
|
||||
clientConfig, err := Config(f.kubeconfig, f.kubecontext, f.baseName)
|
||||
clientConfig, err := f.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -95,7 +120,7 @@ func (f *factory) Client() (clientset.Interface, error) {
|
|||
}
|
||||
|
||||
func (f *factory) KubeClient() (kubernetes.Interface, error) {
|
||||
clientConfig, err := Config(f.kubeconfig, f.kubecontext, f.baseName)
|
||||
clientConfig, err := f.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -108,11 +133,10 @@ func (f *factory) KubeClient() (kubernetes.Interface, error) {
|
|||
}
|
||||
|
||||
func (f *factory) DynamicClient() (dynamic.Interface, error) {
|
||||
clientConfig, err := Config(f.kubeconfig, f.kubecontext, f.baseName)
|
||||
clientConfig, err := f.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
|
@ -120,6 +144,18 @@ func (f *factory) DynamicClient() (dynamic.Interface, error) {
|
|||
return dynamicClient, nil
|
||||
}
|
||||
|
||||
func (f *factory) SetBasename(name string) {
|
||||
f.baseName = name
|
||||
}
|
||||
|
||||
func (f *factory) SetClientQPS(qps float32) {
|
||||
f.clientQPS = qps
|
||||
}
|
||||
|
||||
func (f *factory) SetClientBurst(burst int) {
|
||||
f.clientBurst = burst
|
||||
}
|
||||
|
||||
func (f *factory) Namespace() string {
|
||||
return f.namespace
|
||||
}
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
Copyright 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package client
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestFactory tests the client.Factory interface.
|
||||
func TestFactory(t *testing.T) {
|
||||
// Velero client configuration is currently omitted due to requiring a
|
||||
// test filesystem in pkg/test. This causes an import cycle as pkg/test
|
||||
// uses pkg/client's interfaces to implement fakes
|
||||
|
||||
// Env variable should set the namespace if no config or argument are used
|
||||
os.Setenv("VELERO_NAMESPACE", "env-velero")
|
||||
f := NewFactory("velero")
|
||||
|
||||
assert.Equal(t, "env-velero", f.Namespace())
|
||||
|
||||
os.Unsetenv("VELERO_NAMESPACE")
|
||||
|
||||
// Argument should change the namespace
|
||||
f = NewFactory("velero")
|
||||
s := "flag-velero"
|
||||
flags := new(pflag.FlagSet)
|
||||
|
||||
f.BindFlags(flags)
|
||||
|
||||
flags.Parse([]string{"--namespace", s})
|
||||
|
||||
assert.Equal(t, s, f.Namespace())
|
||||
|
||||
// An arugment overrides the env variable if both are set.
|
||||
os.Setenv("VELERO_NAMESPACE", "env-velero")
|
||||
f = NewFactory("velero")
|
||||
flags = new(pflag.FlagSet)
|
||||
|
||||
f.BindFlags(flags)
|
||||
flags.Parse([]string{"--namespace", s})
|
||||
assert.Equal(t, s, f.Namespace())
|
||||
|
||||
os.Unsetenv("VELERO_NAMESPACE")
|
||||
}
|
|
@ -61,7 +61,8 @@ func NewServerCommand(f client.Factory) *cobra.Command {
|
|||
logger := logging.DefaultLogger(logLevel, formatFlag.Parse())
|
||||
logger.Infof("Starting Velero restic server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
|
||||
|
||||
s, err := newResticServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
|
||||
f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
|
||||
s, err := newResticServer(logger, f)
|
||||
cmd.CheckError(err)
|
||||
|
||||
s.run()
|
||||
|
@ -87,20 +88,16 @@ type resticServer struct {
|
|||
fileSystem filesystem.Interface
|
||||
}
|
||||
|
||||
func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, error) {
|
||||
clientConfig, err := client.Config("", "", baseName)
|
||||
func newResticServer(logger logrus.FieldLogger, factory client.Factory) (*resticServer, error) {
|
||||
|
||||
kubeClient, err := factory.KubeClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(clientConfig)
|
||||
veleroClient, err := factory.Client()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
veleroClient, err := clientset.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use a stand-alone pod informer because we want to use a field selector to
|
||||
|
@ -123,7 +120,7 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer,
|
|||
// to fully-encrypted backups and have unique keys per repository.
|
||||
secretInformer := corev1informers.NewFilteredSecretInformer(
|
||||
kubeClient,
|
||||
os.Getenv("VELERO_NAMESPACE"),
|
||||
factory.Namespace(),
|
||||
0,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
func(opts *metav1.ListOptions) {
|
||||
|
@ -136,7 +133,7 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer,
|
|||
s := &resticServer{
|
||||
kubeClient: kubeClient,
|
||||
veleroClient: veleroClient,
|
||||
veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, os.Getenv("VELERO_NAMESPACE"), nil),
|
||||
veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, factory.Namespace(), nil),
|
||||
kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0),
|
||||
podInformer: podInformer,
|
||||
secretInformer: secretInformer,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -19,7 +19,6 @@ package server
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
|
@ -32,7 +31,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
@ -127,7 +125,7 @@ type controllerRunInfo struct {
|
|||
numWorkers int
|
||||
}
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
var (
|
||||
volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(":")
|
||||
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
|
||||
|
@ -171,25 +169,13 @@ func NewCommand() *cobra.Command {
|
|||
|
||||
logger.Infof("Starting Velero server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA())
|
||||
|
||||
// NOTE: the namespace flag is bound to velero's persistent flags when the root velero command
|
||||
// creates the client Factory and binds the Factory's flags. We're not using a Factory here in
|
||||
// the server because the Factory gets its basename set at creation time, and the basename is
|
||||
// used to construct the user-agent for clients. Also, the Factory's Namespace() method uses
|
||||
// the client config file to determine the appropriate namespace to use, and that isn't
|
||||
// applicable to the server (it uses the method directly below instead). We could potentially
|
||||
// add a SetBasename() method to the Factory, and tweak how Namespace() works, if we wanted to
|
||||
// have the server use the Factory.
|
||||
namespaceFlag := c.Flag("namespace")
|
||||
if namespaceFlag == nil {
|
||||
cmd.CheckError(errors.New("unable to look up namespace flag"))
|
||||
}
|
||||
namespace := getServerNamespace(namespaceFlag)
|
||||
|
||||
if volumeSnapshotLocations.Data() != nil {
|
||||
config.defaultVolumeSnapshotLocations = volumeSnapshotLocations.Data()
|
||||
}
|
||||
|
||||
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), config, logger)
|
||||
f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
|
||||
|
||||
s, err := newServer(f, config, logger)
|
||||
cmd.CheckError(err)
|
||||
|
||||
cmd.CheckError(s.run())
|
||||
|
@ -216,20 +202,6 @@ func NewCommand() *cobra.Command {
|
|||
return command
|
||||
}
|
||||
|
||||
func getServerNamespace(namespaceFlag *pflag.Flag) string {
|
||||
if namespaceFlag.Changed {
|
||||
return namespaceFlag.Value.String()
|
||||
}
|
||||
|
||||
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
|
||||
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
|
||||
return ns
|
||||
}
|
||||
}
|
||||
|
||||
return api.DefaultNamespace
|
||||
}
|
||||
|
||||
type server struct {
|
||||
namespace string
|
||||
metricsAddress string
|
||||
|
@ -251,29 +223,30 @@ type server struct {
|
|||
config serverConfig
|
||||
}
|
||||
|
||||
func newServer(namespace, baseName string, config serverConfig, logger *logrus.Logger) (*server, error) {
|
||||
clientConfig, err := client.Config("", "", baseName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) {
|
||||
if config.clientQPS < 0.0 {
|
||||
return nil, errors.New("client-qps must be positive")
|
||||
}
|
||||
clientConfig.QPS = config.clientQPS
|
||||
f.SetClientQPS(config.clientQPS)
|
||||
|
||||
if config.clientBurst <= 0 {
|
||||
return nil, errors.New("client-burst must be positive")
|
||||
}
|
||||
clientConfig.Burst = config.clientBurst
|
||||
f.SetClientBurst(config.clientBurst)
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(clientConfig)
|
||||
kubeClient, err := f.KubeClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
veleroClient, err := clientset.NewForConfig(clientConfig)
|
||||
veleroClient, err := f.Client()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dynamicClient, err := f.DynamicClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pluginRegistry := clientmgmt.NewRegistry(config.pluginDir, logger, logger.Level)
|
||||
|
@ -285,22 +258,22 @@ func newServer(namespace, baseName string, config serverConfig, logger *logrus.L
|
|||
return nil, err
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
clientConfig, err := f.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
s := &server{
|
||||
namespace: namespace,
|
||||
namespace: f.Namespace(),
|
||||
metricsAddress: config.metricsAddress,
|
||||
kubeClientConfig: clientConfig,
|
||||
kubeClient: kubeClient,
|
||||
veleroClient: veleroClient,
|
||||
discoveryClient: veleroClient.Discovery(),
|
||||
dynamicClient: dynamicClient,
|
||||
sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(veleroClient, 0, informers.WithNamespace(namespace)),
|
||||
sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(veleroClient, 0, informers.WithNamespace(f.Namespace())),
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
logger: logger,
|
||||
|
|
|
@ -63,7 +63,7 @@ operations can also be performed as 'velero backup get' and 'velero schedule cre
|
|||
backup.NewCommand(f),
|
||||
schedule.NewCommand(f),
|
||||
restore.NewCommand(f),
|
||||
server.NewCommand(),
|
||||
server.NewCommand(f),
|
||||
version.NewCommand(f),
|
||||
get.NewCommand(f),
|
||||
install.NewCommand(f),
|
||||
|
|
|
@ -144,6 +144,14 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment
|
|||
Name: "VELERO_SCRATCH_DIR",
|
||||
Value: "/scratch",
|
||||
},
|
||||
{
|
||||
Name: "VELERO_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: c.resources,
|
||||
},
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestDeployment(t *testing.T) {
|
|||
assert.Equal(t, "--restore-only", deploy.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
deploy = Deployment("velero", WithEnvFromSecretKey("my-var", "my-secret", "my-key"))
|
||||
envSecret := deploy.Spec.Template.Spec.Containers[0].Env[1]
|
||||
envSecret := deploy.Spec.Template.Spec.Containers[0].Env[2]
|
||||
assert.Equal(t, "my-var", envSecret.Name)
|
||||
assert.Equal(t, "my-secret", envSecret.ValueFrom.SecretKeyRef.LocalObjectReference.Name)
|
||||
assert.Equal(t, "my-key", envSecret.ValueFrom.SecretKeyRef.Key)
|
||||
|
@ -42,6 +42,6 @@ func TestDeployment(t *testing.T) {
|
|||
assert.Equal(t, corev1.PullIfNotPresent, deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy)
|
||||
|
||||
deploy = Deployment("velero", WithSecret(true))
|
||||
assert.Equal(t, 4, len(deploy.Spec.Template.Spec.Containers[0].Env))
|
||||
assert.Equal(t, 5, len(deploy.Spec.Template.Spec.Containers[0].Env))
|
||||
assert.Equal(t, 3, len(deploy.Spec.Template.Spec.Volumes))
|
||||
}
|
||||
|
|
|
@ -13,6 +13,8 @@ toc:
|
|||
subfolderitems:
|
||||
- page: Overview
|
||||
url: /install-overview
|
||||
- page: Upgrade to 1.1
|
||||
url: /upgrade-to-1.1
|
||||
- page: Quick start with in-cluster MinIO
|
||||
url: /get-started
|
||||
- page: Run on AWS
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
# Upgrading to Velero 1.0
|
||||
|
||||
## Prerequisites
|
||||
- Velero v0.11 installed. If you're not already on v0.11, see the [instructions for upgrading to v0.11][0]. **Upgrading directly from v0.10.x or earlier to v1.0 is not supported!**
|
||||
- (Optional, but strongly recommended) Create a full copy of the object storage bucket(s) Velero is using. Part 1 of the upgrade procedure will modify the contents of the bucket, so we recommend creating a backup copy of it prior to upgrading.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Part 1 - Rewrite Legacy Metadata
|
||||
|
||||
#### Overview
|
||||
|
||||
You need to replace legacy metadata in object storage with updated versions **for any backups that were originally taken with a version prior to v0.11 (i.e. when the project was named Ark)**. While Velero v0.11 is backwards-compatible with these legacy files, Velero v1.0 is not.
|
||||
|
||||
_If you're sure that you do not have any backups that were originally created prior to v0.11 (with Ark), you can proceed directly to Part 2._
|
||||
|
||||
We've added a CLI command to [Velero v0.11.1][1], `velero migrate-backups`, to help you with this. This command will:
|
||||
|
||||
- Replace `ark-backup.json` files in object storage with equivalent `velero-backup.json` files.
|
||||
- Create `<backup-name>-volumesnapshots.json.gz` files in object storage if they don't already exist, containing snapshot metadata populated from the backups' `status.volumeBackups` field*.
|
||||
|
||||
_*backups created prior to v0.10 stored snapshot metadata in the `status.volumeBackups` field, but it has subsequently been replaced with the `<backup-name>-volumesnapshots.json.gz` file._
|
||||
|
||||
|
||||
#### Instructions
|
||||
1. Download the [v0.11.1 release tarball][1] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH.
|
||||
|
||||
1. Scale down your existing Velero deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 0
|
||||
```
|
||||
|
||||
1. Fetch velero's credentials for accessing your object storage bucket and store them locally for use by `velero migrate-backups`:
|
||||
|
||||
For AWS:
|
||||
|
||||
```bash
|
||||
export AWS_SHARED_CREDENTIALS_FILE=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $AWS_SHARED_CREDENTIALS_FILE
|
||||
````
|
||||
|
||||
For Azure:
|
||||
|
||||
```bash
|
||||
export AZURE_SUBSCRIPTION_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_SUBSCRIPTION_ID}" | base64 --decode)
|
||||
export AZURE_TENANT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_TENANT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_ID=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_ID}" | base64 --decode)
|
||||
export AZURE_CLIENT_SECRET=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_CLIENT_SECRET}" | base64 --decode)
|
||||
export AZURE_RESOURCE_GROUP=$(kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.AZURE_RESOURCE_GROUP}" | base64 --decode)
|
||||
```
|
||||
|
||||
For GCP:
|
||||
|
||||
```bash
|
||||
export GOOGLE_APPLICATION_CREDENTIALS=./velero-migrate-backups-credentials
|
||||
kubectl -n velero get secret cloud-credentials -o jsonpath="{.data.cloud}" | base64 --decode > $GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
|
||||
1. List all of your backup storage locations:
|
||||
|
||||
```bash
|
||||
velero backup-location get
|
||||
```
|
||||
|
||||
1. For each backup storage location that you want to use with Velero 1.0, replace any legacy pre-v0.11 backup metadata with the equivalent current formats:
|
||||
|
||||
```
|
||||
# - BACKUP_LOCATION_NAME is the name of a backup location from the previous step, whose
|
||||
# backup metadata will be updated in object storage
|
||||
# - SNAPSHOT_LOCATION_NAME is the name of the volume snapshot location that Velero should
|
||||
# record volume snapshots as existing in (this is only relevant if you have backups that
|
||||
# were originally taken with a pre-v0.10 Velero/Ark.)
|
||||
velero migrate-backups \
|
||||
--backup-location <BACKUP_LOCATION_NAME> \
|
||||
--snapshot-location <SNAPSHOT_LOCATION_NAME>
|
||||
```
|
||||
|
||||
1. Scale up your deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n velero scale deployment/velero --replicas 1
|
||||
```
|
||||
|
||||
1. Remove the local `velero` credentials:
|
||||
|
||||
For AWS:
|
||||
|
||||
```
|
||||
rm $AWS_SHARED_CREDENTIALS_FILE
|
||||
unset AWS_SHARED_CREDENTIALS_FILE
|
||||
```
|
||||
|
||||
For Azure:
|
||||
|
||||
```
|
||||
unset AZURE_SUBSCRIPTION_ID
|
||||
unset AZURE_TENANT_ID
|
||||
unset AZURE_CLIENT_ID
|
||||
unset AZURE_CLIENT_SECRET
|
||||
unset AZURE_RESOURCE_GROUP
|
||||
```
|
||||
|
||||
For GCP:
|
||||
|
||||
```
|
||||
rm $GOOGLE_APPLICATION_CREDENTIALS
|
||||
unset GOOGLE_APPLICATION_CREDENTIALS
|
||||
```
|
||||
|
||||
### Part 2 - Upgrade Components to Velero 1.0
|
||||
|
||||
#### Overview
|
||||
|
||||
#### Instructions
|
||||
|
||||
1. Download the [v1.0 release tarball][2] tarball for your client platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
|
||||
```bash
|
||||
tar -xvf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
|
||||
1. Move the `velero` binary from the Velero directory to somewhere in your PATH, replacing any existing pre-1.0 `velero` binaries.
|
||||
|
||||
1. Update the image for the Velero deployment and daemon set (if applicable):
|
||||
|
||||
```bash
|
||||
kubectl -n velero set image deployment/velero velero=gcr.io/heptio-images/velero:v1.0.0
|
||||
kubectl -n velero set image daemonset/restic restic=gcr.io/heptio-images/velero:v1.0.0
|
||||
```
|
||||
|
||||
[0]: https://velero.io/docs/v0.11.0/migrating-to-velero
|
||||
[1]: https://github.com/heptio/velero/releases/tag/v0.11.1
|
||||
[2]: https://github.com/heptio/velero/releases/tag/v1.0.0
|
|
@ -0,0 +1,25 @@
|
|||
# Upgrading to Velero 1.1
|
||||
|
||||
## Prerequisites
|
||||
- [Velero v1.0][1] installed.
|
||||
|
||||
Velero v1.1 only requires user intervention if Velero is running in a namespace other than `velero`.
|
||||
|
||||
## Instructions
|
||||
|
||||
### Adding VELERO_NAMESPACE environment variable to the deployment
|
||||
|
||||
Previous versions of Velero's server detected the namespace in which it was running by inspecting the container's filesystem.
|
||||
With v1.1, this is no longer the case, and the server must be made aware of the namespace it is running in with the `VELERO_NAMESPACE` environment variable.
|
||||
|
||||
`velero install` automatically writes this for new deployments, but existing installations will need to add the environment variable before upgrading.
|
||||
|
||||
You can use the following command to patch the deployment:
|
||||
|
||||
```bash
|
||||
kubectl patch deployment/velero -n <YOUR_NAMESPACE> \
|
||||
--type='json' \
|
||||
-p='[{"op":"add","path":"/spec/template/spec/containers/0/env/0","value":{"name":"VELERO_NAMESPACE", "valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}}]'
|
||||
```
|
||||
|
||||
[1]: https://github.com/heptio/velero/releases/tag/v1.0.0
|
Loading…
Reference in New Issue