fixed more stuff
parent
f22efd871a
commit
9a3ecab61a
|
@ -23,10 +23,9 @@ import (
|
||||||
|
|
||||||
// nodeCmd represents the set of node subcommands
|
// nodeCmd represents the set of node subcommands
|
||||||
var nodeCmd = &cobra.Command{
|
var nodeCmd = &cobra.Command{
|
||||||
Use: "node",
|
Use: "node",
|
||||||
Short: "Node operations",
|
Short: "Node operations",
|
||||||
Long: "Operations on nodes",
|
Long: "Operations on nodes",
|
||||||
Hidden: true, // This won't be fully functional and thus should not be documented yet
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
|
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
|
||||||
},
|
},
|
||||||
|
|
|
@ -47,6 +47,10 @@ var nodeAddCmd = &cobra.Command{
|
||||||
if nodeName == "" {
|
if nodeName == "" {
|
||||||
name = profile + strconv.Itoa(len(mc.Nodes)+1)
|
name = profile + strconv.Itoa(len(mc.Nodes)+1)
|
||||||
}
|
}
|
||||||
|
_, _, err = node.Retrieve(mc, name)
|
||||||
|
if err == nil {
|
||||||
|
exit.WithCodeT(100, "{{.nodeName}} already exists in cluster {{.cluster}}. Choose a different name.", out.V{"nodeName": name, "cluster": mc.Name})
|
||||||
|
}
|
||||||
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
out.T(out.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": profile})
|
||||||
|
|
||||||
err = node.Add(mc, name, cp, worker, "", profile)
|
err = node.Add(mc, name, cp, worker, "", profile)
|
||||||
|
|
|
@ -46,7 +46,7 @@ var nodeDeleteCmd = &cobra.Command{
|
||||||
|
|
||||||
err = node.Delete(*cc, name)
|
err = node.Delete(*cc, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
out.FatalT("Failed to delete node {{.name}}", out.V{"name": name})
|
exit.WithError("deleting node", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name})
|
out.T(out.Deleted, "Node {{.name}} was successfully deleted.", out.V{"name": name})
|
||||||
|
|
|
@ -65,6 +65,7 @@ type Driver struct {
|
||||||
UUID string
|
UUID string
|
||||||
VpnKitSock string
|
VpnKitSock string
|
||||||
VSockPorts []string
|
VSockPorts []string
|
||||||
|
ClusterName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDriver creates a new driver for a host
|
// NewDriver creates a new driver for a host
|
||||||
|
@ -199,7 +200,7 @@ func (d *Driver) Restart() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) createHost() (*hyperkit.HyperKit, error) {
|
func (d *Driver) createHost() (*hyperkit.HyperKit, error) {
|
||||||
stateDir := filepath.Join(d.StorePath, "machines", d.MachineName)
|
stateDir := filepath.Join(d.StorePath, "machines", d.ClusterName, d.MachineName)
|
||||||
h, err := hyperkit.New("", d.VpnKitSock, stateDir)
|
h, err := hyperkit.New("", d.VpnKitSock, stateDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "new-ing Hyperkit")
|
return nil, errors.Wrap(err, "new-ing Hyperkit")
|
||||||
|
@ -519,6 +520,7 @@ func (d *Driver) sendSignal(s os.Signal) error {
|
||||||
func (d *Driver) getPid() int {
|
func (d *Driver) getPid() int {
|
||||||
pidPath := d.ResolveStorePath(machineFileName)
|
pidPath := d.ResolveStorePath(machineFileName)
|
||||||
|
|
||||||
|
log.Debugf("PIDPATH=%s", pidPath)
|
||||||
f, err := os.Open(pidPath)
|
f, err := os.Open(pidPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("Error reading pid file: %v", err)
|
log.Warnf("Error reading pid file: %v", err)
|
||||||
|
|
|
@ -52,6 +52,14 @@ const (
|
||||||
var (
|
var (
|
||||||
// ErrKeyNotFound is the error returned when a key doesn't exist in the config file
|
// ErrKeyNotFound is the error returned when a key doesn't exist in the config file
|
||||||
ErrKeyNotFound = errors.New("specified key could not be found in config")
|
ErrKeyNotFound = errors.New("specified key could not be found in config")
|
||||||
|
// DockerEnv contains the environment variables
|
||||||
|
DockerEnv []string
|
||||||
|
// DockerOpt contains the option parameters
|
||||||
|
DockerOpt []string
|
||||||
|
// ExtraOptions contains extra options (if any)
|
||||||
|
ExtraOptions ExtraOptionSlice
|
||||||
|
// AddonList contains the list of addons
|
||||||
|
AddonList []string
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrNotExist is the error returned when a config does not exist
|
// ErrNotExist is the error returned when a config does not exist
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package config
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DockerEnv contains the environment variables
|
|
||||||
DockerEnv []string
|
|
||||||
// DockerOpt contains the option parameters
|
|
||||||
DockerOpt []string
|
|
||||||
// ExtraOptions contains extra options (if any)
|
|
||||||
ExtraOptions ExtraOptionSlice
|
|
||||||
// AddonList contains the list of addons
|
|
||||||
AddonList []string
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddNode adds a new node config to an existing cluster.
|
|
||||||
func AddNode(cc *ClusterConfig, name string, controlPlane bool, k8sVersion string, profileName string) error {
|
|
||||||
node := Node{
|
|
||||||
Name: name,
|
|
||||||
Worker: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
if controlPlane {
|
|
||||||
node.ControlPlane = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if k8sVersion != "" {
|
|
||||||
node.KubernetesVersion = k8sVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
cc.Nodes = append(cc.Nodes, node)
|
|
||||||
return SaveProfile(profileName, cc)
|
|
||||||
}
|
|
|
@ -137,29 +137,31 @@ func CacheAndLoadImages(images []string) error {
|
||||||
return errors.Wrap(err, "list profiles")
|
return errors.Wrap(err, "list profiles")
|
||||||
}
|
}
|
||||||
for _, p := range profiles { // loading images to all running profiles
|
for _, p := range profiles { // loading images to all running profiles
|
||||||
pName := p.Name // capture the loop variable
|
for _, n := range p.Config.Nodes {
|
||||||
status, err := GetHostStatus(api, pName)
|
pName := n.Name // capture the loop variable
|
||||||
if err != nil {
|
status, err := GetHostStatus(api, pName)
|
||||||
glog.Warningf("skipping loading cache for profile %s", pName)
|
|
||||||
glog.Errorf("error getting status for %s: %v", pName, err)
|
|
||||||
continue // try next machine
|
|
||||||
}
|
|
||||||
if status == state.Running.String() { // the not running hosts will load on next start
|
|
||||||
h, err := api.Load(pName)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
glog.Warningf("skipping loading cache for profile %s", pName)
|
||||||
|
glog.Errorf("error getting status for %s: %v", pName, err)
|
||||||
|
continue // try next machine
|
||||||
}
|
}
|
||||||
cr, err := CommandRunner(h)
|
if status == state.Running.String() { // the not running hosts will load on next start
|
||||||
if err != nil {
|
h, err := api.Load(pName)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
c, err := config.Load(pName)
|
}
|
||||||
if err != nil {
|
cr, err := CommandRunner(h)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
}
|
||||||
if err != nil {
|
c, err := config.Load(pName)
|
||||||
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = LoadImages(c, cr, images, constants.ImageCacheDir)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ type MockDownloader struct{}
|
||||||
func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" }
|
func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" }
|
||||||
func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil }
|
func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil }
|
||||||
|
|
||||||
func createMockDriverHost(c config.ClusterConfig) (interface{}, error) {
|
func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,28 +67,35 @@ var defaultClusterConfig = config.ClusterConfig{
|
||||||
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
|
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var defaultNodeConfig = config.Node{
|
||||||
|
Name: viper.GetString("profile"),
|
||||||
|
}
|
||||||
|
|
||||||
func TestCreateHost(t *testing.T) {
|
func TestCreateHost(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
|
|
||||||
exists, _ := api.Exists(viper.GetString("profile"))
|
profile := viper.GetString("profile")
|
||||||
|
exists, _ := api.Exists(profile)
|
||||||
if exists {
|
if exists {
|
||||||
t.Fatal("Machine already exists.")
|
t.Fatal("Machine already exists.")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := createHost(api, defaultClusterConfig)
|
n := config.Node{Name: profile}
|
||||||
|
|
||||||
|
_, err := createHost(api, defaultClusterConfig, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating host: %v", err)
|
t.Fatalf("Error creating host: %v", err)
|
||||||
}
|
}
|
||||||
exists, err = api.Exists(viper.GetString("profile"))
|
exists, err = api.Exists(profile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("exists failed for %q: %v", viper.GetString("profile"), err)
|
t.Fatalf("exists failed for %q: %v", profile, err)
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
t.Fatalf("%q does not exist, but should.", viper.GetString("profile"))
|
t.Fatalf("%q does not exist, but should.", profile)
|
||||||
}
|
}
|
||||||
|
|
||||||
h, err := api.Load(viper.GetString("profile"))
|
h, err := api.Load(profile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error loading machine: %v", err)
|
t.Fatalf("Error loading machine: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -113,8 +120,9 @@ func TestCreateHost(t *testing.T) {
|
||||||
func TestStartHostExists(t *testing.T) {
|
func TestStartHostExists(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
|
|
||||||
// Create an initial host.
|
// Create an initial host.
|
||||||
ih, err := createHost(api, defaultClusterConfig)
|
ih, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating host: %v", err)
|
t.Fatalf("Error creating host: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -131,9 +139,8 @@ func TestStartHostExists(t *testing.T) {
|
||||||
mc := defaultClusterConfig
|
mc := defaultClusterConfig
|
||||||
mc.Name = ih.Name
|
mc.Name = ih.Name
|
||||||
|
|
||||||
n := config.Node{Name: ih.Name}
|
|
||||||
// This should pass without calling Create because the host exists already.
|
// This should pass without calling Create because the host exists already.
|
||||||
h, err := StartHost(api, mc, n)
|
h, err := StartHost(api, mc, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error starting host: %v", err)
|
t.Fatalf("Error starting host: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -153,7 +160,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
||||||
api.NotExistError = true
|
api.NotExistError = true
|
||||||
h, err := createHost(api, defaultClusterConfig)
|
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating host: %v", err)
|
t.Fatalf("Error creating host: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -199,7 +206,7 @@ func TestStartStoppedHost(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
// Create an initial host.
|
// Create an initial host.
|
||||||
h, err := createHost(api, defaultClusterConfig)
|
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating host: %v", err)
|
t.Fatalf("Error creating host: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -311,7 +318,7 @@ func TestStopHostError(t *testing.T) {
|
||||||
func TestStopHost(t *testing.T) {
|
func TestStopHost(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
h, err := createHost(api, defaultClusterConfig)
|
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -327,7 +334,7 @@ func TestStopHost(t *testing.T) {
|
||||||
func TestDeleteHost(t *testing.T) {
|
func TestDeleteHost(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,7 +346,7 @@ func TestDeleteHost(t *testing.T) {
|
||||||
func TestDeleteHostErrorDeletingVM(t *testing.T) {
|
func TestDeleteHostErrorDeletingVM(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
h, err := createHost(api, defaultClusterConfig)
|
h, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -356,7 +363,7 @@ func TestDeleteHostErrorDeletingFiles(t *testing.T) {
|
||||||
RegisterMockDriver(t)
|
RegisterMockDriver(t)
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
api.RemoveError = true
|
api.RemoveError = true
|
||||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,7 +377,7 @@ func TestDeleteHostErrMachineNotExist(t *testing.T) {
|
||||||
api := tests.NewMockAPI(t)
|
api := tests.NewMockAPI(t)
|
||||||
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
// Create an incomplete host with machine does not exist error(i.e. User Interrupt Cancel)
|
||||||
api.NotExistError = true
|
api.NotExistError = true
|
||||||
_, err := createHost(api, defaultClusterConfig)
|
_, err := createHost(api, defaultClusterConfig, defaultNodeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -396,7 +403,7 @@ func TestGetHostStatus(t *testing.T) {
|
||||||
|
|
||||||
checkState(state.None.String())
|
checkState(state.None.String())
|
||||||
|
|
||||||
if _, err := createHost(api, defaultClusterConfig); err != nil {
|
if _, err := createHost(api, defaultClusterConfig, defaultNodeConfig); err != nil {
|
||||||
t.Errorf("createHost failed: %v", err)
|
t.Errorf("createHost failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ func fixHost(api libmachine.API, mc config.ClusterConfig, n config.Node) (*host.
|
||||||
}
|
}
|
||||||
// recreate virtual machine
|
// recreate virtual machine
|
||||||
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name})
|
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": mc.Name})
|
||||||
h, err = createHost(api, mc)
|
h, err = createHost(api, mc, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error recreating VM")
|
return nil, errors.Wrap(err, "Error recreating VM")
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
glog.Infof("releasing machines lock for %q, held for %s", cfg.Name, time.Since(start))
|
glog.Infof("releasing machines lock for %q, held for %s", n.Name, time.Since(start))
|
||||||
releaser.Release()
|
releaser.Release()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -78,8 +78,8 @@ func StartHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*ho
|
||||||
return nil, errors.Wrapf(err, "exists: %s", n.Name)
|
return nil, errors.Wrapf(err, "exists: %s", n.Name)
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
glog.Infof("Provisioning new machine with config: %+v", n)
|
glog.Infof("Provisioning new machine with config: %+v %+v", cfg, n)
|
||||||
return createHost(api, cfg)
|
return createHost(api, cfg, n)
|
||||||
}
|
}
|
||||||
glog.Infoln("Skipping create...Using existing machine configuration")
|
glog.Infoln("Skipping create...Using existing machine configuration")
|
||||||
return fixHost(api, cfg, n)
|
return fixHost(api, cfg, n)
|
||||||
|
@ -96,8 +96,8 @@ func engineOptions(cfg config.ClusterConfig) *engine.Options {
|
||||||
return &o
|
return &o
|
||||||
}
|
}
|
||||||
|
|
||||||
func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error) {
|
func createHost(api libmachine.API, cfg config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||||
glog.Infof("createHost starting for %q (driver=%q)", cfg.Name, cfg.Driver)
|
glog.Infof("createHost starting for %q (driver=%q)", n.Name, cfg.Driver)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
glog.Infof("createHost completed in %s", time.Since(start))
|
glog.Infof("createHost completed in %s", time.Since(start))
|
||||||
|
@ -114,7 +114,7 @@ func createHost(api libmachine.API, cfg config.ClusterConfig) (*host.Host, error
|
||||||
if def.Empty() {
|
if def.Empty() {
|
||||||
return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver)
|
return nil, fmt.Errorf("unsupported/missing driver: %s", cfg.Driver)
|
||||||
}
|
}
|
||||||
dd, err := def.Config(cfg)
|
dd, err := def.Config(cfg, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "config")
|
return nil, errors.Wrap(err, "config")
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ package node
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/machine"
|
"k8s.io/minikube/pkg/minikube/machine"
|
||||||
|
@ -76,10 +77,9 @@ func Delete(cc config.ClusterConfig, name string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
/*err = Stop(cc, nd)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("Failed to stop node %s. Will still try to delete.", name)
|
glog.Warningf("Failed to stop node %s. Will still try to delete.", name)
|
||||||
}*/
|
}
|
||||||
|
|
||||||
api, err := machine.NewAPIClient()
|
api, err := machine.NewAPIClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -105,20 +105,3 @@ func Retrieve(cc *config.ClusterConfig, name string) (*config.Node, int, error)
|
||||||
|
|
||||||
return nil, -1, errors.New("Could not find node " + name)
|
return nil, -1, errors.New("Could not find node " + name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save saves a node to a cluster
|
|
||||||
func Save(cfg *config.ClusterConfig, node *config.Node) error {
|
|
||||||
update := false
|
|
||||||
for i, n := range cfg.Nodes {
|
|
||||||
if n.Name == node.Name {
|
|
||||||
cfg.Nodes[i] = *node
|
|
||||||
update = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !update {
|
|
||||||
cfg.Nodes = append(cfg.Nodes, *node)
|
|
||||||
}
|
|
||||||
return config.SaveProfile(viper.GetString(config.MachineProfile), cfg)
|
|
||||||
}
|
|
||||||
|
|
|
@ -38,6 +38,11 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
||||||
var cacheGroup errgroup.Group
|
var cacheGroup errgroup.Group
|
||||||
beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion)
|
beginCacheRequiredImages(&cacheGroup, cc.KubernetesConfig.ImageRepository, n.KubernetesVersion)
|
||||||
|
|
||||||
|
// Why do we need this?
|
||||||
|
if cc.Downloader == nil {
|
||||||
|
cc.Downloader = util.DefaultDownloader{}
|
||||||
|
}
|
||||||
|
|
||||||
runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n)
|
runner, preExists, mAPI, _ := cluster.StartMachine(&cc, &n)
|
||||||
defer mAPI.Close()
|
defer mAPI.Close()
|
||||||
|
|
||||||
|
|
|
@ -43,15 +43,15 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
return kic.NewDriver(kic.Config{
|
return kic.NewDriver(kic.Config{
|
||||||
MachineName: mc.Name,
|
MachineName: n.Name,
|
||||||
StorePath: localpath.MiniPath(),
|
StorePath: localpath.MiniPath(),
|
||||||
ImageDigest: kic.BaseImage,
|
ImageDigest: kic.BaseImage,
|
||||||
CPU: mc.CPUs,
|
CPU: mc.CPUs,
|
||||||
Memory: mc.Memory,
|
Memory: mc.Memory,
|
||||||
OCIBinary: oci.Docker,
|
OCIBinary: oci.Docker,
|
||||||
APIServerPort: mc.Nodes[0].Port,
|
APIServerPort: n.Port,
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
"github.com/pborman/uuid"
|
"github.com/pborman/uuid"
|
||||||
|
|
||||||
"k8s.io/minikube/pkg/drivers/hyperkit"
|
"k8s.io/minikube/pkg/drivers/hyperkit"
|
||||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
"k8s.io/minikube/pkg/minikube/localpath"
|
"k8s.io/minikube/pkg/minikube/localpath"
|
||||||
"k8s.io/minikube/pkg/minikube/registry"
|
"k8s.io/minikube/pkg/minikube/registry"
|
||||||
|
@ -57,28 +57,29 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
u := config.UUID
|
u := cfg.UUID
|
||||||
if u == "" {
|
if u == "" {
|
||||||
u = uuid.NewUUID().String()
|
u = uuid.NewUUID().String()
|
||||||
}
|
}
|
||||||
|
|
||||||
return &hyperkit.Driver{
|
return &hyperkit.Driver{
|
||||||
BaseDriver: &drivers.BaseDriver{
|
BaseDriver: &drivers.BaseDriver{
|
||||||
MachineName: config.Name,
|
MachineName: cfg.Name,
|
||||||
StorePath: localpath.MiniPath(),
|
StorePath: localpath.MiniPath(),
|
||||||
SSHUser: "docker",
|
SSHUser: "docker",
|
||||||
},
|
},
|
||||||
Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO),
|
ClusterName: cfg.Name,
|
||||||
DiskSize: config.DiskSize,
|
Boot2DockerURL: cfg.Downloader.GetISOFileURI(cfg.MinikubeISO),
|
||||||
Memory: config.Memory,
|
DiskSize: cfg.DiskSize,
|
||||||
CPU: config.CPUs,
|
Memory: cfg.Memory,
|
||||||
NFSShares: config.NFSShare,
|
CPU: cfg.CPUs,
|
||||||
NFSSharesRoot: config.NFSSharesRoot,
|
NFSShares: cfg.NFSShare,
|
||||||
|
NFSSharesRoot: cfg.NFSSharesRoot,
|
||||||
UUID: u,
|
UUID: u,
|
||||||
VpnKitSock: config.HyperkitVpnKitSock,
|
VpnKitSock: cfg.HyperkitVpnKitSock,
|
||||||
VSockPorts: config.HyperkitVSockPorts,
|
VSockPorts: cfg.HyperkitVSockPorts,
|
||||||
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + config.Name,
|
Cmdline: "loglevel=3 console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes random.trust_cpu=on hw_rng_model=virtio base host=" + n.Name,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
|
|
||||||
parallels "github.com/Parallels/docker-machine-parallels"
|
parallels "github.com/Parallels/docker-machine-parallels"
|
||||||
"github.com/docker/machine/libmachine/drivers"
|
"github.com/docker/machine/libmachine/drivers"
|
||||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
"k8s.io/minikube/pkg/minikube/localpath"
|
"k8s.io/minikube/pkg/minikube/localpath"
|
||||||
"k8s.io/minikube/pkg/minikube/registry"
|
"k8s.io/minikube/pkg/minikube/registry"
|
||||||
|
@ -44,12 +44,12 @@ func init() {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
d := parallels.NewDriver(config.Name, localpath.MiniPath()).(*parallels.Driver)
|
d := parallels.NewDriver(n.Name, localpath.MiniPath()).(*parallels.Driver)
|
||||||
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
|
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
|
||||||
d.Memory = config.Memory
|
d.Memory = cfg.Memory
|
||||||
d.CPU = config.CPUs
|
d.CPU = cfg.CPUs
|
||||||
d.DiskSize = config.DiskSize
|
d.DiskSize = cfg.DiskSize
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,15 +49,15 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
return kic.NewDriver(kic.Config{
|
return kic.NewDriver(kic.Config{
|
||||||
MachineName: mc.Name,
|
MachineName: n.Name,
|
||||||
StorePath: localpath.MiniPath(),
|
StorePath: localpath.MiniPath(),
|
||||||
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
|
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
|
||||||
CPU: mc.CPUs,
|
CPU: mc.CPUs,
|
||||||
Memory: mc.Memory,
|
Memory: mc.Memory,
|
||||||
OCIBinary: oci.Podman,
|
OCIBinary: oci.Podman,
|
||||||
APIServerPort: mc.Nodes[0].Port,
|
APIServerPort: n.Port,
|
||||||
}), nil
|
}), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,8 +49,8 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
d := virtualbox.NewDriver(mc.Name, localpath.MiniPath())
|
d := virtualbox.NewDriver(n.Name, localpath.MiniPath())
|
||||||
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
||||||
d.Memory = mc.Memory
|
d.Memory = mc.Memory
|
||||||
d.CPU = mc.CPUs
|
d.CPU = mc.CPUs
|
||||||
|
|
|
@ -39,8 +39,8 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(mc config.ClusterConfig) (interface{}, error) {
|
func configure(mc config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
d := vmwcfg.NewConfig(mc.Name, localpath.MiniPath())
|
d := vmwcfg.NewConfig(n.Name, localpath.MiniPath())
|
||||||
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
d.Boot2DockerURL = mc.Downloader.GetISOFileURI(mc.MinikubeISO)
|
||||||
d.Memory = mc.Memory
|
d.Memory = mc.Memory
|
||||||
d.CPU = mc.CPUs
|
d.CPU = mc.CPUs
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"github.com/docker/machine/libmachine/drivers"
|
"github.com/docker/machine/libmachine/drivers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
cfg "k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/driver"
|
"k8s.io/minikube/pkg/minikube/driver"
|
||||||
"k8s.io/minikube/pkg/minikube/localpath"
|
"k8s.io/minikube/pkg/minikube/localpath"
|
||||||
"k8s.io/minikube/pkg/minikube/registry"
|
"k8s.io/minikube/pkg/minikube/registry"
|
||||||
|
@ -44,12 +44,12 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func configure(config cfg.ClusterConfig) (interface{}, error) {
|
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) {
|
||||||
d := vmwarefusion.NewDriver(config.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
|
d := vmwarefusion.NewDriver(n.Name, localpath.MiniPath()).(*vmwarefusion.Driver)
|
||||||
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
|
d.Boot2DockerURL = cfg.Downloader.GetISOFileURI(cfg.MinikubeISO)
|
||||||
d.Memory = config.Memory
|
d.Memory = cfg.Memory
|
||||||
d.CPU = config.CPUs
|
d.CPU = cfg.CPUs
|
||||||
d.DiskSize = config.DiskSize
|
d.DiskSize = cfg.DiskSize
|
||||||
|
|
||||||
// TODO(philips): push these defaults upstream to fixup this driver
|
// TODO(philips): push these defaults upstream to fixup this driver
|
||||||
d.SSHPort = 22
|
d.SSHPort = 22
|
||||||
|
|
|
@ -60,7 +60,7 @@ type Registry interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configurator emits a struct to be marshalled into JSON for Machine Driver
|
// Configurator emits a struct to be marshalled into JSON for Machine Driver
|
||||||
type Configurator func(config.ClusterConfig) (interface{}, error)
|
type Configurator func(config.ClusterConfig, config.Node) (interface{}, error)
|
||||||
|
|
||||||
// Loader is a function that loads a byte stream and creates a driver.
|
// Loader is a function that loads a byte stream and creates a driver.
|
||||||
type Loader func() drivers.Driver
|
type Loader func() drivers.Driver
|
||||||
|
|
Loading…
Reference in New Issue