Support running in any namespace

Add the ability for the Ark server to run in any namespace.

Add `ark client config get/set` for manipulating the new client
configuration file in $HOME/.config/ark/config.json. This holds client
defaults, such as the Ark server's namespace (to avoid having to specify
the --namespace flag all the time).

Add a --namespace flag to all client commands.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
pull/272/head
Andy Goldstein 2017-12-22 09:43:44 -05:00
parent d56b5f2b66
commit 816f14c0b4
35 changed files with 335 additions and 99 deletions

75
pkg/client/config.go Normal file
View File

@ -0,0 +1,75 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"encoding/json"
"os"
"path/filepath"
"github.com/pkg/errors"
)
const (
ConfigKeyNamespace = "namespace"
)
// LoadConfig loads the Ark client configuration file and returns it as a map[string]string. If the
// file does not exist, an empty map is returned.
func LoadConfig() (map[string]string, error) {
fileName := configFileName()
_, err := os.Stat(fileName)
if os.IsNotExist(err) {
// If the file isn't there, just return an empty map
return map[string]string{}, nil
}
if err != nil {
// For any other Stat() error, return it
return nil, errors.WithStack(err)
}
configFile, err := os.Open(fileName)
if err != nil {
return nil, errors.WithStack(err)
}
defer configFile.Close()
var config map[string]string
if err := json.NewDecoder(configFile).Decode(&config); err != nil {
return nil, errors.WithStack(err)
}
return config, nil
}
// SaveConfig saves the passed in config map to the Ark client configuration file.
func SaveConfig(config map[string]string) error {
fileName := configFileName()
configFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
if err != nil {
return errors.WithStack(err)
}
defer configFile.Close()
return json.NewEncoder(configFile).Encode(&config)
}
func configFileName() string {
return filepath.Join(os.Getenv("HOME"), ".config", "ark", "config.json")
}

View File

@ -17,30 +17,36 @@ limitations under the License.
package client
import (
"fmt"
"os"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"k8s.io/client-go/kubernetes"
"github.com/heptio/ark/pkg/apis/ark/v1"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
)
// Factory knows how to create an ArkClient and Kubernetes client.
type Factory interface {
// BindFlags binds common flags such as --kubeconfig to the passed-in FlagSet.
// BindFlags binds common flags (--kubeconfig, --namespace) to the passed-in FlagSet.
BindFlags(flags *pflag.FlagSet)
// Client returns an ArkClient. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
Client() (clientset.Interface, error)
// KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
KubeClient() (kubernetes.Interface, error)
Namespace() string
}
type factory struct {
flags *pflag.FlagSet
kubeconfig string
baseName string
namespace string
}
// NewFactory returns a Factory.
@ -49,7 +55,19 @@ func NewFactory(baseName string) Factory {
flags: pflag.NewFlagSet("", pflag.ContinueOnError),
baseName: baseName,
}
if config, err := LoadConfig(); err == nil {
f.namespace = config[ConfigKeyNamespace]
} else {
fmt.Fprintf(os.Stderr, "WARNING: error retrieving namespace from config file: %v\n", err)
}
if f.namespace == "" {
f.namespace = v1.DefaultNamespace
}
f.flags.StringVar(&f.kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration")
f.flags.StringVarP(&f.namespace, "namespace", "n", f.namespace, "The namespace in which Ark should operate")
return f
}
@ -83,3 +101,7 @@ func (f *factory) KubeClient() (kubernetes.Interface, error) {
}
return kubeClient, nil
}
func (f *factory) Namespace() string {
return f.namespace
}

View File

@ -23,6 +23,7 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
cliclient "github.com/heptio/ark/pkg/cmd/cli/client"
"github.com/heptio/ark/pkg/cmd/cli/create"
"github.com/heptio/ark/pkg/cmd/cli/delete"
"github.com/heptio/ark/pkg/cmd/cli/describe"
@ -63,6 +64,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
runplugin.NewCommand(),
plugin.NewCommand(f),
delete.NewCommand(f),
cliclient.NewCommand(),
)
// add the glog flags

View File

@ -118,7 +118,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
backup := &api.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Namespace: f.Namespace(),
Name: o.Name,
Labels: o.Labels.Data(),
},

View File

@ -23,7 +23,6 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/controller"
@ -55,7 +54,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
backupName := args[0]
err = arkClient.ArkV1().Backups(api.DefaultNamespace).Delete(backupName, nil)
err = arkClient.ArkV1().Backups(f.Namespace()).Delete(backupName, nil)
cmd.CheckError(err)
fmt.Printf("Backup %q deleted\n", backupName)

View File

@ -42,12 +42,12 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
backups = new(v1.BackupList)
for _, name := range args {
backup, err := arkClient.Ark().Backups(v1.DefaultNamespace).Get(name, metav1.GetOptions{})
backup, err := arkClient.Ark().Backups(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
backups.Items = append(backups.Items, *backup)
}
} else {
backups, err = arkClient.ArkV1().Backups(v1.DefaultNamespace).List(listOptions)
backups, err = arkClient.ArkV1().Backups(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -106,7 +106,7 @@ func (o *DownloadOptions) Run(c *cobra.Command, f client.Factory) error {
}
defer backupDest.Close()
err = downloadrequest.Stream(arkClient.ArkV1(), o.Name, v1.DownloadTargetKindBackupContents, backupDest, o.Timeout)
err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), o.Name, v1.DownloadTargetKindBackupContents, backupDest, o.Timeout)
if err != nil {
os.Remove(o.Output)
cmd.CheckError(err)

View File

@ -44,12 +44,12 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
backups = new(api.BackupList)
for _, name := range args {
backup, err := arkClient.Ark().Backups(api.DefaultNamespace).Get(name, metav1.GetOptions{})
backup, err := arkClient.Ark().Backups(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
backups.Items = append(backups.Items, *backup)
}
} else {
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(listOptions)
backups, err = arkClient.ArkV1().Backups(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -44,7 +44,7 @@ func NewLogsCommand(f client.Factory) *cobra.Command {
arkClient, err := f.Client()
cmd.CheckError(err)
err = downloadrequest.Stream(arkClient.ArkV1(), args[0], v1.DownloadTargetKindBackupLog, os.Stdout, timeout)
err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), args[0], v1.DownloadTargetKindBackupLog, os.Stdout, timeout)
cmd.CheckError(err)
},
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 the Heptio Ark contributors.
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,16 +14,22 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package config
package client
import (
"github.com/heptio/ark/pkg/cmd/cli/client/config"
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewGetCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{}
func NewCommand() *cobra.Command {
c := &cobra.Command{
Use: "client",
Short: "Ark client related commands",
}
c.AddCommand(
config.NewCommand(),
)
return c
}

View File

@ -18,20 +18,17 @@ package config
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
func NewCommand() *cobra.Command {
c := &cobra.Command{
Use: "config",
Short: "Work with config",
Long: "Work with config",
Short: "Get and set client configuration file values",
}
c.AddCommand(
NewGetCommand(f),
NewSetCommand(f),
NewGetCommand(),
NewSetCommand(),
)
return c

View File

@ -0,0 +1,60 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"sort"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/spf13/cobra"
)
func NewGetCommand() *cobra.Command {
c := &cobra.Command{
Use: "get [KEY 1] [KEY 2] [...]",
Short: "Get client configuration file values",
Run: func(c *cobra.Command, args []string) {
config, err := client.LoadConfig()
cmd.CheckError(err)
if len(args) == 0 {
keys := make([]string, 0, len(config))
for key := range config {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
fmt.Printf("%s: %s\n", key, config[key])
}
} else {
for _, key := range args {
value, found := config[key]
if !found {
value = "<NOT SET>"
}
fmt.Printf("%s: %s\n", key, value)
}
}
},
}
return c
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"fmt"
"os"
"strings"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func NewSetCommand() *cobra.Command {
c := &cobra.Command{
Use: "set KEY=VALUE [KEY=VALUE]...",
Short: "Set client configuration file values",
Run: func(c *cobra.Command, args []string) {
if len(args) < 1 {
cmd.CheckError(errors.Errorf("At least one KEY=VALUE argument is required"))
}
config, err := client.LoadConfig()
cmd.CheckError(err)
for _, arg := range args {
pair := strings.Split(arg, "=")
if len(pair) != 2 {
fmt.Fprintf(os.Stderr, "WARNING: invalid KEY=VALUE: %q\n", arg)
continue
}
key, value := pair[0], pair[1]
if value == "" {
delete(config, key)
} else {
config[key] = value
}
}
cmd.CheckError(client.SaveConfig(config))
},
}
return c
}

View File

@ -1,29 +0,0 @@
/*
Copyright 2017 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewSetCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{}
return c
}

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
ark "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/flag"
@ -61,7 +60,7 @@ func NewAddCommand(f client.Factory) *cobra.Command {
cmd.CheckError(err)
}
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(arkDeployment, metav1.GetOptions{})
if err != nil {
cmd.CheckError(err)
}
@ -131,7 +130,7 @@ func NewAddCommand(f client.Factory) *cobra.Command {
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
ark "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
@ -46,7 +45,7 @@ func NewRemoveCommand(f client.Factory) *cobra.Command {
cmd.CheckError(err)
}
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(arkDeployment, metav1.GetOptions{})
if err != nil {
cmd.CheckError(err)
}
@ -78,7 +77,7 @@ func NewRemoveCommand(f client.Factory) *cobra.Command {
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@ -118,7 +118,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
restore := &api.Restore{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Namespace: f.Namespace(),
Name: fmt.Sprintf("%s-%s", o.BackupName, time.Now().Format("20060102150405")),
Labels: o.Labels.Data(),
},

View File

@ -22,7 +22,6 @@ import (
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
@ -42,7 +41,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
name := args[0]
err = arkClient.ArkV1().Restores(api.DefaultNamespace).Delete(name, nil)
err = arkClient.ArkV1().Restores(f.Namespace()).Delete(name, nil)
cmd.CheckError(err)
fmt.Printf("Restore %q deleted\n", name)

View File

@ -42,12 +42,12 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
restores = new(api.RestoreList)
for _, name := range args {
restore, err := arkClient.Ark().Restores(api.DefaultNamespace).Get(name, metav1.GetOptions{})
restore, err := arkClient.Ark().Restores(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
restores.Items = append(restores.Items, *restore)
}
} else {
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(listOptions)
restores, err = arkClient.ArkV1().Restores(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -44,12 +44,12 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
restores = new(api.RestoreList)
for _, name := range args {
restore, err := arkClient.Ark().Restores(api.DefaultNamespace).Get(name, metav1.GetOptions{})
restore, err := arkClient.Ark().Restores(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
restores.Items = append(restores.Items, *restore)
}
} else {
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(listOptions)
restores, err = arkClient.ArkV1().Restores(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -44,7 +44,7 @@ func NewLogsCommand(f client.Factory) *cobra.Command {
arkClient, err := f.Client()
cmd.CheckError(err)
err = downloadrequest.Stream(arkClient.ArkV1(), args[0], v1.DownloadTargetKindRestoreLog, os.Stdout, timeout)
err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), args[0], v1.DownloadTargetKindRestoreLog, os.Stdout, timeout)
cmd.CheckError(err)
},
}

View File

@ -93,7 +93,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
schedule := &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Namespace: f.Namespace(),
Name: o.BackupOptions.Name,
},
Spec: api.ScheduleSpec{

View File

@ -22,7 +22,6 @@ import (
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
@ -42,7 +41,7 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
name := args[0]
err = arkClient.ArkV1().Schedules(api.DefaultNamespace).Delete(name, nil)
err = arkClient.ArkV1().Schedules(f.Namespace()).Delete(name, nil)
cmd.CheckError(err)
fmt.Printf("Schedule %q deleted\n", name)

View File

@ -42,12 +42,12 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
schedules = new(v1.ScheduleList)
for _, name := range args {
schedule, err := arkClient.Ark().Schedules(v1.DefaultNamespace).Get(name, metav1.GetOptions{})
schedule, err := arkClient.Ark().Schedules(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
schedules.Items = append(schedules.Items, *schedule)
}
} else {
schedules, err = arkClient.ArkV1().Schedules(v1.DefaultNamespace).List(listOptions)
schedules, err = arkClient.ArkV1().Schedules(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -44,12 +44,12 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
if len(args) > 0 {
schedules = new(api.ScheduleList)
for _, name := range args {
schedule, err := arkClient.Ark().Schedules(api.DefaultNamespace).Get(name, metav1.GetOptions{})
schedule, err := arkClient.Ark().Schedules(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
schedules.Items = append(schedules.Items, *schedule)
}
} else {
schedules, err = arkClient.ArkV1().Schedules(api.DefaultNamespace).List(listOptions)
schedules, err = arkClient.ArkV1().Schedules(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}

View File

@ -19,6 +19,7 @@ package server
import (
"context"
"fmt"
"io/ioutil"
"reflect"
"sort"
"strings"
@ -29,6 +30,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -61,7 +63,6 @@ import (
func NewCommand() *cobra.Command {
var (
kubeconfig string
sortedLogLevels = getSortedLogLevels()
logLevelFlag = flag.NewEnum(logrus.InfoLevel.String(), sortedLogLevels...)
)
@ -86,7 +87,21 @@ func NewCommand() *cobra.Command {
logger := newLogger(logLevel, &logging.ErrorLocationHook{}, &logging.LogLocationHook{})
logger.Infof("Starting Ark server %s", buildinfo.FormattedGitSHA())
s, err := newServer(kubeconfig, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), logger)
// NOTE: the namespace flag is bound to ark's persistent flags when the root ark command
// creates the client Factory and binds the Factory's flags. We're not using a Factory here in
// the server because the Factory gets its basename set at creation time, and the basename is
// used to construct the user-agent for clients. Also, the Factory's Namespace() method uses
// the client config file to determine the appropriate namespace to use, and that isn't
// applicable to the server (it uses the method directly below instead). We could potentially
// add a SetBasename() method to the Factory, and tweak how Namespace() works, if we wanted to
// have the server use the Factory.
namespaceFlag := c.Flag("namespace")
if namespaceFlag == nil {
cmd.CheckError(errors.New("unable to look up namespace flag"))
}
namespace := getServerNamespace(namespaceFlag)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), logger)
cmd.CheckError(err)
@ -95,11 +110,24 @@ func NewCommand() *cobra.Command {
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(sortedLogLevels, ", ")))
command.Flags().StringVar(&kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration")
return command
}
func getServerNamespace(namespaceFlag *pflag.Flag) string {
if namespaceFlag.Changed {
return namespaceFlag.Value.String()
}
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return api.DefaultNamespace
}
func newLogger(level logrus.Level, hooks ...logrus.Hook) *logrus.Logger {
logger := logrus.New()
logger.Level = level
@ -132,6 +160,7 @@ func getSortedLogLevels() []string {
}
type server struct {
namespace string
kubeClientConfig *rest.Config
kubeClient kubernetes.Interface
arkClient clientset.Interface
@ -146,8 +175,8 @@ type server struct {
pluginManager plugin.Manager
}
func newServer(kubeconfig, baseName string, logger *logrus.Logger) (*server, error) {
clientConfig, err := client.Config(kubeconfig, baseName)
func newServer(namespace, baseName string, logger *logrus.Logger) (*server, error) {
clientConfig, err := client.Config("", baseName)
if err != nil {
return nil, err
}
@ -170,12 +199,13 @@ func newServer(kubeconfig, baseName string, logger *logrus.Logger) (*server, err
ctx, cancelFunc := context.WithCancel(context.Background())
s := &server{
namespace: namespace,
kubeClientConfig: clientConfig,
kubeClient: kubeClient,
arkClient: arkClient,
discoveryClient: arkClient.Discovery(),
clientPool: dynamic.NewDynamicClientPool(clientConfig),
sharedInformerFactory: informers.NewSharedInformerFactory(arkClient, 0),
sharedInformerFactory: informers.NewFilteredSharedInformerFactory(arkClient, 0, namespace, nil),
ctx: ctx,
cancelFunc: cancelFunc,
logger: logger,
@ -218,12 +248,12 @@ func (s *server) run() error {
}
func (s *server) ensureArkNamespace() error {
logContext := s.logger.WithField("namespace", api.DefaultNamespace)
logContext := s.logger.WithField("namespace", s.namespace)
logContext.Info("Ensuring namespace exists for backups")
defaultNamespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: api.DefaultNamespace,
Name: s.namespace,
},
}
@ -243,7 +273,7 @@ func (s *server) loadConfig() (*api.Config, error) {
err error
)
for {
config, err = s.arkClient.ArkV1().Configs(api.DefaultNamespace).Get("default", metav1.GetOptions{})
config, err = s.arkClient.ArkV1().Configs(s.namespace).Get("default", metav1.GetOptions{})
if err == nil {
break
}
@ -459,6 +489,7 @@ func (s *server) runControllers(config *api.Config) error {
}()
scheduleController := controller.NewScheduleController(
s.namespace,
s.arkClient.ArkV1(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Schedules(),
@ -511,6 +542,7 @@ func (s *server) runControllers(config *api.Config) error {
cmd.CheckError(err)
restoreController := controller.NewRestoreController(
s.namespace,
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(),
s.arkClient.ArkV1(),

View File

@ -33,10 +33,10 @@ import (
arkclientv1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
)
func Stream(client arkclientv1.DownloadRequestsGetter, name string, kind v1.DownloadTargetKind, w io.Writer, timeout time.Duration) error {
func Stream(client arkclientv1.DownloadRequestsGetter, namespace, name string, kind v1.DownloadTargetKind, w io.Writer, timeout time.Duration) error {
req := &v1.DownloadRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Namespace: namespace,
Name: fmt.Sprintf("%s-%s", name, time.Now().Format("20060102150405")),
},
Spec: v1.DownloadRequestSpec{
@ -47,18 +47,19 @@ func Stream(client arkclientv1.DownloadRequestsGetter, name string, kind v1.Down
},
}
req, err := client.DownloadRequests(v1.DefaultNamespace).Create(req)
req, err := client.DownloadRequests(namespace).Create(req)
if err != nil {
return errors.WithStack(err)
}
defer client.DownloadRequests(v1.DefaultNamespace).Delete(req.Name, nil)
defer client.DownloadRequests(namespace).Delete(req.Name, nil)
listOptions := metav1.ListOptions{
//TODO: once kube-apiserver http://issue.k8s.io/51046 is fixed, uncomment
// TODO: once the minimum supported Kubernetes version is v1.9.0, uncomment the following line.
// See http://issue.k8s.io/51046 for details.
//FieldSelector: "metadata.name=" + req.Name
ResourceVersion: req.ResourceVersion,
}
watcher, err := client.DownloadRequests(v1.DefaultNamespace).Watch(listOptions)
watcher, err := client.DownloadRequests(namespace).Watch(listOptions)
if err != nil {
return errors.WithStack(err)
}
@ -78,6 +79,8 @@ Loop:
return errors.Errorf("unexpected type %T", e.Object)
}
// TODO: once the minimum supported Kubernetes version is v1.9.0, remove the following check.
// See http://issue.k8s.io/51046 for details.
if updated.Name != req.Name {
continue
}

View File

@ -150,7 +150,7 @@ func TestStream(t *testing.T) {
output := new(bytes.Buffer)
errCh := make(chan error)
go func() {
err := Stream(client.ArkV1(), "name", test.kind, output, timeout)
err := Stream(client.ArkV1(), "namespace", "name", test.kind, output, timeout)
errCh <- err
}()

View File

@ -108,7 +108,7 @@ func describeRestoreResults(d *Describer, restore *v1.Restore, arkClient clients
var buf bytes.Buffer
var resultMap map[string]v1.RestoreResult
if err := downloadrequest.Stream(arkClient.ArkV1(), restore.Name, v1.DownloadTargetKindRestoreResults, &buf, 30*time.Second); err != nil {
if err := downloadrequest.Stream(arkClient.ArkV1(), restore.Namespace, restore.Name, v1.DownloadTargetKindRestoreResults, &buf, 30*time.Second); err != nil {
d.Printf("Warnings:\t<error getting warnings: %v>\n\nErrors:\t<error getting errors: %v>\n", err, err)
return
}

View File

@ -297,7 +297,7 @@ func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter
return nil, errors.Wrap(err, "error creating two-way merge patch for backup")
}
res, err := client.Backups(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
res, err := client.Backups(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching backup")
}

View File

@ -276,7 +276,7 @@ func patchDownloadRequest(original, updated *v1.DownloadRequest, client arkv1cli
return nil, errors.Wrap(err, "error creating two-way merge patch for download request")
}
res, err := client.DownloadRequests(v1.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
res, err := client.DownloadRequests(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching download request")
}

View File

@ -54,6 +54,7 @@ import (
var nonRestorableResources = []string{"nodes"}
type restoreController struct {
namespace string
restoreClient arkv1client.RestoresGetter
backupClient arkv1client.BackupsGetter
restorer restore.Restorer
@ -71,6 +72,7 @@ type restoreController struct {
}
func NewRestoreController(
namespace string,
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
backupClient arkv1client.BackupsGetter,
@ -83,6 +85,7 @@ func NewRestoreController(
pluginManager plugin.Manager,
) Interface {
c := &restoreController{
namespace: namespace,
restoreClient: restoreClient,
backupClient: backupClient,
restorer: restorer,
@ -319,7 +322,7 @@ func (controller *restoreController) getValidationErrors(itm *api.Restore) []str
}
func (controller *restoreController) fetchBackup(bucket, name string) (*api.Backup, error) {
backup, err := controller.backupLister.Backups(api.DefaultNamespace).Get(name)
backup, err := controller.backupLister.Backups(controller.namespace).Get(name)
if err == nil {
return backup, nil
}
@ -338,8 +341,10 @@ func (controller *restoreController) fetchBackup(bucket, name string) (*api.Back
// ResourceVersion needs to be cleared in order to create the object in the API
backup.ResourceVersion = ""
// Clear out the namespace too, just in case
backup.Namespace = ""
created, createErr := controller.backupClient.Backups(api.DefaultNamespace).Create(backup)
created, createErr := controller.backupClient.Backups(controller.namespace).Create(backup)
if createErr != nil {
logContext.WithError(errors.WithStack(createErr)).Error("Unable to create API object for Backup")
} else {
@ -495,7 +500,7 @@ func patchRestore(original, updated *api.Restore, client arkv1client.RestoresGet
return nil, errors.Wrap(err, "error creating two-way merge patch for restore")
}
res, err := client.Restores(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
res, err := client.Restores(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching restore")
}

View File

@ -82,6 +82,7 @@ func TestFetchBackup(t *testing.T) {
)
c := NewRestoreController(
api.DefaultNamespace,
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
client.ArkV1(),
@ -249,6 +250,7 @@ func TestProcessRestore(t *testing.T) {
defer backupSvc.AssertExpectations(t)
c := NewRestoreController(
api.DefaultNamespace,
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
client.ArkV1(),

View File

@ -45,6 +45,7 @@ import (
)
type scheduleController struct {
namespace string
schedulesClient arkv1client.SchedulesGetter
backupsClient arkv1client.BackupsGetter
schedulesLister listers.ScheduleLister
@ -57,6 +58,7 @@ type scheduleController struct {
}
func NewScheduleController(
namespace string,
schedulesClient arkv1client.SchedulesGetter,
backupsClient arkv1client.BackupsGetter,
schedulesInformer informers.ScheduleInformer,
@ -69,6 +71,7 @@ func NewScheduleController(
}
c := &scheduleController{
namespace: namespace,
schedulesClient: schedulesClient,
backupsClient: backupsClient,
schedulesLister: schedulesInformer.Lister(),
@ -153,7 +156,7 @@ func (controller *scheduleController) Run(ctx context.Context, numWorkers int) e
}
func (controller *scheduleController) enqueueAllEnabledSchedules() {
schedules, err := controller.schedulesLister.Schedules(api.DefaultNamespace).List(labels.NewSelector())
schedules, err := controller.schedulesLister.Schedules(controller.namespace).List(labels.NewSelector())
if err != nil {
controller.logger.WithError(errors.WithStack(err)).Error("Error listing Schedules")
return
@ -388,7 +391,7 @@ func patchSchedule(original, updated *api.Schedule, client arkv1client.Schedules
return nil, errors.Wrap(err, "error creating two-way merge patch for schedule")
}
res, err := client.Schedules(api.DefaultNamespace).Patch(original.Name, types.MergePatchType, patchBytes)
res, err := client.Schedules(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)
if err != nil {
return nil, errors.Wrap(err, "error patching schedule")
}

View File

@ -124,6 +124,7 @@ func TestProcessSchedule(t *testing.T) {
)
c := NewScheduleController(
"namespace",
client.ArkV1(),
client.ArkV1(),
sharedInformers.Ark().V1().Schedules(),