fixed more stuff

pull/6787/head
Sharif Elgamal 2020-02-24 15:09:04 -08:00
parent f22efd871a
commit 9a3ecab61a
20 changed files with 120 additions and 156 deletions

View File

@ -23,10 +23,9 @@ import (
// nodeCmd represents the set of node subcommands
var nodeCmd = &cobra.Command{
Use: "node",
Short: "Node operations",
Long: "Operations on nodes",
Hidden: true, // This won't be fully functional and thus should not be documented yet
Use: "node",
Short: "Node operations",
Long: "Operations on nodes",
Run: func(cmd *cobra.Command, args []string) {
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
},

View File

@ -47,6 +47,10 @@ var nodeAddCmd = &cobra.Command{
if nodeName == "" {
name = profile + strconv.Itoa(len(mc.Nodes)+1)
}
_, _, err = node.Retrieve(mc, name)
if err == nil {
exit.WithCodeT(100, "{{.nodeName}} already exists in cluster {{.cluster}}. Choose a different name.", out.V{"nodeName": name, "cluster": mc.Name})
}
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
err = node.Add(mc, name, cp, worker, "", profile)

View File

@ -46,7 +46,7 @@ var nodeDeleteCmd = &cobra.Command{
err = node.Delete(*cc, name)
if err != nil {
out.FatalT("Failed to delete node {{.name}}", out.V{"name": name})
exit.WithError("deleting node", err)
}
out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name})

View File

@ -65,6 +65,7 @@ type Driver struct {
UUID string
VpnKitSock string
VSockPorts []string
ClusterName string
}
// NewDriver creates a new driver for a host
@ -199,7 +200,7 @@ func (d *Driver) Restart() error {
}
func (d *Driver) createHost() (*hyperkit.HyperKit, error) {
stateDir := filepath.Join(d.StorePath, "machines", d.MachineName)
stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName)
h, err := hyperkit.New("", d.VpnKitSock, stateDir)
if err != nil {
return nil, errors.Wrap(err, "new-ing Hyperkit")
@ -519,6 +520,7 @@ func (d *Driver) sendSignal(s os.Signal) error {
func (d *Driver) getPid() int {
pidPath := d.ResolveStorePath(machineFileName)
log.Debugf("PIDPATH=%s", pidPath)
f, err := os.Open(pidPath)
if err != nil {
log.Warnf("Error reading pid file: %v", err)

View File

@ -52,6 +52,14 @@ const (
var (
// ErrKeyNotFound is the error returned when a key doesn't exist in the config file
ErrKeyNotFound = errors.New("specified key could not be found in config")
// DockerEnv contains the environment variables
DockerEnv []string
// DockerOpt contains the option parameters
DockerOpt []string
// ExtraOptions contains extra options (if any)
ExtraOptions ExtraOptionSlice
// AddonList contains the list of addons
AddonList []string
)
// ErrNotExist is the error returned when a config does not exist

View File

@ -1,47 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
var (
// DockerEnv contains the environment variables
DockerEnv []string
// DockerOpt contains the option parameters
DockerOpt []string
// ExtraOptions contains extra options (if any)
ExtraOptions ExtraOptionSlice
// AddonList contains the list of addons
AddonList []string
)
// AddNode adds a new node config to an existing cluster.
func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error {
node := Node{
Name: name,
Worker: true,
}
if controlPlane {
node.ControlPlane = true
}
if k8sVersion != "" {
node.KubernetesVersion = k8sVersion
}
cc.Nodes = append(cc.Nodes, node)
return SaveProfile(profileName, cc)
}

View File

@ -137,29 +137,31 @@ func CacheAndLoadImages(images []string) error {
return errors.Wrap(err, "list profiles")
}
for _, p := range profiles { // loading images to all running profiles
pName := p.Name // capture the loop variable
status, err := GetHostStatus(api, pName)
if err != nil {
glog.Warningf("skipping loading cache for profile %s", pName)
glog.Errorf("error getting status for %s: %v", pName, err)
continue // try next machine
}
if status == state.Running.String() { // the not running hosts will load on next start
h, err := api.Load(pName)
for _, n := range p.Config.Nodes {
pName := n.Name // capture the loop variable
status, err := GetHostStatus(api, pName)
if err != nil {
return err
glog.Warningf("skipping loading cache for profile %s", pName)
glog.Errorf("error getting status for %s: %v", pName, err)
continue // try next machine
}
cr, err := CommandRunner(h)
if err != nil {
return err
}
c, err := config.Load(pName)
if err != nil {
return err
}
err = LoadImages(c, cr, images, constants.ImageCacheDir)
if err != nil {
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
if status == state.Running.String() { // the not running hosts will load on next start
h, err := api.Load(pName)
if err != nil {
return err
}
cr, err := CommandRunner(h)
if err != nil {
return err
}
c, err := config.Load(pName)
if err != nil {
return err
}
err = LoadImages(c, cr, images, constants.ImageCacheDir)
if err != nil {
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
}
}
}
}

View File

@ -41,7 +41,7 @@ type MockDownloader struct{}
func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" }
func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil }
func createMockDriverHost(c config.ClusterConfig) (interface{}, error) {
func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, error) {
return nil, nil
}
@ -67,28 +67,35 @@ var defaultClusterConfig = config.ClusterConfig{
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
}
var defaultNodeConfig = config.Node{
Name: viper.GetString("profile"),
}
func TestCreateHost(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
exists, _ := api.Exists(viper.GetString("profile"))
profile := viper.GetString("profile")
exists, _ := api.Exists(profile)
if exists {
t.Fatal("Machine already exists.")
}
_, err := createHost(api, defaultClusterConfig)
n := config.Node{Name: profile}
_, err := createHost(api, defaultClusterConfig, n)
if err != nil {
t.Fatalf("Error creating host: %v", err)
}
exists, err = api.Exists(viper.GetString("profile"))
exists, err = api.Exists(profile)
if err != nil {
t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err)
t.Fatalf("exists failed for %q: %v", profile, err)
}
if !exists {
t.Fatalf("%q does not exist, but should.", viper.GetString("profile"))
t.Fatalf("%q does not exist, but should.", profile)
}
h, err := api.Load(viper.GetString("profile"))
h, err := api.Load(profile)
if err != nil {
t.Fatalf("Error loading machine: %v", err)
}
@ -113,8 +120,9 @@ func TestCreateHost(t *testing.T) {
func TestStartHostExists(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
// Create an initial host.
ih, err := createHost(api, defaultClusterConfig)
ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Fatalf("Error creating host: %v", err)
}
@ -131,9 +139,8 @@ func TestStartHostExists(t *testing.T) {
mc := defaultClusterConfig
mc.Name = ih.Name
n := config.Node{Name: ih.Name}
// This should pass without calling Create because the host exists already.
h, err := StartHost(api, mc, n)
h, err := StartHost(api, mc, defaultNodeConfig)
if err != nil {
t.Fatalf("Error starting host: %v", err)
}
@ -153,7 +160,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
api := tests.NewMockAPI(t)
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
api.NotExistError = true
h, err := createHost(api, defaultClusterConfig)
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Fatalf("Error creating host: %v", err)
}
@ -199,7 +206,7 @@ func TestStartStoppedHost(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
// Create an initial host.
h, err := createHost(api, defaultClusterConfig)
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Fatalf("Error creating host: %v", err)
}
@ -311,7 +318,7 @@ func TestStopHostError(t *testing.T) {
func TestStopHost(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
h, err := createHost(api, defaultClusterConfig)
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Errorf("createHost failed: %v", err)
}
@ -327,7 +334,7 @@ func TestStopHost(t *testing.T) {
func TestDeleteHost(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
if _, err := createHost(api, defaultClusterConfig); err != nil {
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
t.Errorf("createHost failed: %v", err)
}
@ -339,7 +346,7 @@ func TestDeleteHost(t *testing.T) {
func TestDeleteHostErrorDeletingVM(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
h, err := createHost(api, defaultClusterConfig)
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Errorf("createHost failed: %v", err)
}
@ -356,7 +363,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) {
RegisterMockDriver(t)
api := tests.NewMockAPI(t)
api.RemoveError = true
if _, err := createHost(api, defaultClusterConfig); err != nil {
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
t.Errorf("createHost failed: %v", err)
}
@ -370,7 +377,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) {
api := tests.NewMockAPI(t)
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
api.NotExistError = true
_, err := createHost(api, defaultClusterConfig)
_, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
if err != nil {
t.Errorf("createHost failed: %v", err)
}
@ -396,7 +403,7 @@ func TestGetHostStatus(t *testing.T) {
checkState(state.None.String())
if _, err := createHost(api, defaultClusterConfig); err != nil {
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
t.Errorf("createHost failed: %v", err)
}

View File

@ -88,7 +88,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.
}
// recreate virtual machine
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name})
h, err = createHost(api, mc)
h, err = createHost(api, mc, n)
if err != nil {
return nil, errors.Wrap(err, "Error recreating VM")
}

View File

@ -69,7 +69,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho
}
start := time.Now()
defer func() {
glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start))
glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start))
releaser.Release()
}()
@ -78,8 +78,8 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho
return nil, errors.Wrapf(err, "exists: %s", n.Name)
}
if !exists {
glog.Infof("Provisioning new machine with config: %+v", n)
return createHost(api, cfg)
glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n)
return createHost(api, cfg, n)
}
glog.Infoln("Skipping create...Using existing machine configuration")
return fixHost(api, cfg, n)
@ -96,8 +96,8 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options {
return &o
}
func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) {
glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver)
func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) {
glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver)
start := time.Now()
defer func() {
glog.Infof("createHost completed in %s", time.Since(start))
@ -114,7 +114,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error
if def.Empty() {
return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver)
}
dd, err := def.Config(cfg)
dd, err := def.Config(cfg, n)
if err != nil {
return nil, errors.Wrap(err, "config")
}

View File

@ -19,6 +19,7 @@ package node
import (
"errors"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/machine"
@ -76,10 +77,9 @@ func Delete(cc config.ClusterConfig, name string) error {
return err
}
/*err = Stop(cc, nd)
if err != nil {
glog.Warningf("Failed to stop node %s. Will still try to delete.", name)
}*/
}
api, err := machine.NewAPIClient()
if err != nil {
@ -105,20 +105,3 @@ func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error)
return nil, -1, errors.New("Could not find node " + name)
}
// Save saves a node to a cluster
func Save(cfg *config.ClusterConfig, node *config.Node) error {
update := false
for i, n := range cfg.Nodes {
if n.Name == node.Name {
cfg.Nodes[i] = *node
update = true
break
}
}
if !update {
cfg.Nodes = append(cfg.Nodes, *node)
}
return config.SaveProfile(viper.GetString(config.MachineProfile), cfg)
}

View File

@ -38,6 +38,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
var cacheGroup errgroup.Group
beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion)
// Why do we need this?
if cc.Downloader == nil {
cc.Downloader = util.DefaultDownloader{}
}
runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n)
defer mAPI.Close()

View File

@ -43,15 +43,15 @@ func init() {
}
}
func configure(mc config.ClusterConfig) (interface{}, error) {
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: mc.Name,
MachineName: n.Name,
StorePath: localpath.MiniPath(),
ImageDigest: kic.BaseImage,
CPU: mc.CPUs,
Memory: mc.Memory,
OCIBinary: oci.Docker,
APIServerPort: mc.Nodes[0].Port,
APIServerPort: n.Port,
}), nil
}

View File

@ -31,7 +31,7 @@ import (
"github.com/pborman/uuid"
"k8s.io/minikube/pkg/drivers/hyperkit"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/registry"
@ -57,28 +57,29 @@ func init() {
}
}
func configure(config cfg.ClusterConfig) (interface{}, error) {
u := config.UUID
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
u := cfg.UUID
if u == "" {
u = uuid.NewUUID().String()
}
return &hyperkit.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: config.Name,
MachineName: cfg.Name,
StorePath: localpath.MiniPath(),
SSHUser: "docker",
},
Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO),
DiskSize: config.DiskSize,
Memory: config.Memory,
CPU: config.CPUs,
NFSShares: config.NFSShare,
NFSSharesRoot: config.NFSSharesRoot,
ClusterName: cfg.Name,
Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO),
DiskSize: cfg.DiskSize,
Memory: cfg.Memory,
CPU: cfg.CPUs,
NFSShares: cfg.NFSShare,
NFSSharesRoot: cfg.NFSSharesRoot,
UUID: u,
VpnKitSock: config.HyperkitVpnKitSock,
VSockPorts: config.HyperkitVSockPorts,
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + config.Name,
VpnKitSock: cfg.HyperkitVpnKitSock,
VSockPorts: cfg.HyperkitVSockPorts,
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + n.Name,
}, nil
}

View File

@ -24,7 +24,7 @@ import (
parallels "github.com/Parallels/docker-machine-parallels"
"github.com/docker/machine/libmachine/drivers"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/registry"
@ -44,12 +44,12 @@ func init() {
}
func configure(config cfg.ClusterConfig) (interface{}, error) {
d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver)
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.Memory = config.Memory
d.CPU = config.CPUs
d.DiskSize = config.DiskSize
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver)
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
d.Memory = cfg.Memory
d.CPU = cfg.CPUs
d.DiskSize = cfg.DiskSize
return d, nil
}

View File

@ -49,15 +49,15 @@ func init() {
}
}
func configure(mc config.ClusterConfig) (interface{}, error) {
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
return kic.NewDriver(kic.Config{
MachineName: mc.Name,
MachineName: n.Name,
StorePath: localpath.MiniPath(),
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
CPU: mc.CPUs,
Memory: mc.Memory,
OCIBinary: oci.Podman,
APIServerPort: mc.Nodes[0].Port,
APIServerPort: n.Port,
}), nil
}

View File

@ -49,8 +49,8 @@ func init() {
}
}
func configure(mc config.ClusterConfig) (interface{}, error) {
d := virtualbox.NewDriver(mc.Name, localpath.MiniPath())
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
d := virtualbox.NewDriver(n.Name, localpath.MiniPath())
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
d.Memory = mc.Memory
d.CPU = mc.CPUs

View File

@ -39,8 +39,8 @@ func init() {
}
}
func configure(mc config.ClusterConfig) (interface{}, error) {
d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath())
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwcfg.NewConfig(n.Name, localpath.MiniPath())
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
d.Memory = mc.Memory
d.CPU = mc.CPUs

View File

@ -26,7 +26,7 @@ import (
"github.com/docker/machine/libmachine/drivers"
"github.com/pkg/errors"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/registry"
@ -44,12 +44,12 @@ func init() {
}
}
func configure(config cfg.ClusterConfig) (interface{}, error) {
d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.Memory = config.Memory
d.CPU = config.CPUs
d.DiskSize = config.DiskSize
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
d.Memory = cfg.Memory
d.CPU = cfg.CPUs
d.DiskSize = cfg.DiskSize
// TODO(philips): push these defaults upstream to fixup this driver
d.SSHPort = 22

View File

@ -60,7 +60,7 @@ type Registry interface {
}
// Configurator emits a struct to be marshalled into JSON for Machine Driver
type Configurator func(config.ClusterConfig) (interface{}, error)
type Configurator func(config.ClusterConfig, config.Node) (interface{}, error)
// Loader is a function that loads a byte stream and creates a driver.
type Loader func() drivers.Driver