Merge branch 'master' of https://github.com/kubernetes/minikube into mk-image-load
commit
0061d72be6
1
OWNERS
1
OWNERS
|
@ -18,6 +18,7 @@ approvers:
|
||||||
- medyagh
|
- medyagh
|
||||||
- josedonizetti
|
- josedonizetti
|
||||||
- priyawadhwa
|
- priyawadhwa
|
||||||
|
- ilya-zuyev
|
||||||
emeritus_approvers:
|
emeritus_approvers:
|
||||||
- dlorenc
|
- dlorenc
|
||||||
- luxas
|
- luxas
|
||||||
|
|
|
@ -176,6 +176,8 @@ func runStart(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
upgradeExistingConfig(existing)
|
upgradeExistingConfig(existing)
|
||||||
|
} else {
|
||||||
|
validateProfileName()
|
||||||
}
|
}
|
||||||
|
|
||||||
validateSpecifiedDriver(existing)
|
validateSpecifiedDriver(existing)
|
||||||
|
@ -638,6 +640,25 @@ func hostDriver(existing *config.ClusterConfig) string {
|
||||||
return h.Driver.DriverName()
|
return h.Driver.DriverName()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateProfileName makes sure that new profile name not duplicated with any of machine names in existing multi-node clusters.
|
||||||
|
func validateProfileName() {
|
||||||
|
profiles, err := config.ListValidProfiles()
|
||||||
|
if err != nil {
|
||||||
|
exit.Message(reason.InternalListConfig, "Unable to list profiles: {{.error}}", out.V{"error": err})
|
||||||
|
}
|
||||||
|
for _, p := range profiles {
|
||||||
|
for _, n := range p.Config.Nodes {
|
||||||
|
machineName := config.MachineName(*p.Config, n)
|
||||||
|
if ClusterFlagValue() == machineName {
|
||||||
|
out.WarningT("Profile name '{{.name}}' is duplicated with machine name '{{.machine}}' in profile '{{.profile}}'", out.V{"name": ClusterFlagValue(),
|
||||||
|
"machine": machineName,
|
||||||
|
"profile": p.Name})
|
||||||
|
exit.Message(reason.Usage, "Profile name should be unique")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// validateSpecifiedDriver makes sure that if a user has passed in a driver
|
// validateSpecifiedDriver makes sure that if a user has passed in a driver
|
||||||
// it matches the existing cluster if there is one
|
// it matches the existing cluster if there is one
|
||||||
func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
||||||
|
@ -1097,6 +1118,20 @@ func validateFlags(cmd *cobra.Command, drvName string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if driver.IsSSH(drvName) {
|
||||||
|
sshIPAddress := viper.GetString(sshIPAddress)
|
||||||
|
if sshIPAddress == "" {
|
||||||
|
exit.Message(reason.Usage, "No IP address provided. Try specifying --ssh-ip-address, or see https://minikube.sigs.k8s.io/docs/drivers/ssh/")
|
||||||
|
}
|
||||||
|
|
||||||
|
if net.ParseIP(sshIPAddress) == nil {
|
||||||
|
_, err := net.LookupIP(sshIPAddress)
|
||||||
|
if err != nil {
|
||||||
|
exit.Error(reason.Usage, "Could not resolve IP address", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// validate kubeadm extra args
|
// validate kubeadm extra args
|
||||||
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
|
if invalidOpts := bsutil.FindInvalidExtraConfigFlags(config.ExtraOptions); len(invalidOpts) > 0 {
|
||||||
out.WarningT(
|
out.WarningT(
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
@ -32,6 +33,9 @@ import (
|
||||||
"k8s.io/minikube/pkg/minikube/style"
|
"k8s.io/minikube/pkg/minikube/style"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var warnLock sync.Mutex
|
||||||
|
var alreadyWarnedCmds = make(map[string]bool)
|
||||||
|
|
||||||
// RunResult holds the results of a Runner
|
// RunResult holds the results of a Runner
|
||||||
type RunResult struct {
|
type RunResult struct {
|
||||||
Stdout bytes.Buffer
|
Stdout bytes.Buffer
|
||||||
|
@ -133,10 +137,19 @@ func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) {
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
if warn {
|
if warn {
|
||||||
if elapsed > warnTime {
|
if elapsed > warnTime {
|
||||||
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": rr.Command(), "duration": elapsed})
|
warnLock.Lock()
|
||||||
// Don't show any restarting hint, when running podman locally (on linux, with sudo). Only when having a service.
|
_, ok := alreadyWarnedCmds[rr.Command()]
|
||||||
if cmd.Args[0] != "sudo" {
|
if !ok {
|
||||||
out.ErrT(style.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]})
|
alreadyWarnedCmds[rr.Command()] = true
|
||||||
|
}
|
||||||
|
warnLock.Unlock()
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": rr.Command(), "duration": elapsed})
|
||||||
|
// Don't show any restarting hint, when running podman locally (on linux, with sudo). Only when having a service.
|
||||||
|
if cmd.Args[0] != "sudo" {
|
||||||
|
out.ErrT(style.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package oci
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
|
"k8s.io/minikube/pkg/minikube/tests"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRunCmdWarnSlowOnce(t *testing.T) {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f1 := tests.NewFakeFile()
|
||||||
|
out.SetErrFile(f1)
|
||||||
|
|
||||||
|
cmd := exec.Command("sleep", "3")
|
||||||
|
_, err := runCmd(cmd, true)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("runCmd has error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(f1.String(), "Executing \"sleep 3\" took an unusually long time") {
|
||||||
|
t.Errorf("runCmd does not print the correct log, instead print :%v", f1.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
f2 := tests.NewFakeFile()
|
||||||
|
out.SetErrFile(f2)
|
||||||
|
|
||||||
|
cmd = exec.Command("sleep", "3")
|
||||||
|
_, err = runCmd(cmd, true)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("runCmd has error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(f2.String(), "Executing \"sleep 3\" took an unusually long time") {
|
||||||
|
t.Errorf("runCmd does not print the correct log, instead print :%v", f2.String())
|
||||||
|
}
|
||||||
|
}
|
|
@ -207,8 +207,9 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile,
|
||||||
if err == nil {
|
if err == nil {
|
||||||
pDirs = append(pDirs, cs...)
|
pDirs = append(pDirs, cs...)
|
||||||
}
|
}
|
||||||
pDirs = removeDupes(pDirs)
|
|
||||||
for _, n := range pDirs {
|
nodeNames := map[string]bool{}
|
||||||
|
for _, n := range removeDupes(pDirs) {
|
||||||
p, err := LoadProfile(n, miniHome...)
|
p, err := LoadProfile(n, miniHome...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
inValidPs = append(inValidPs, p)
|
inValidPs = append(inValidPs, p)
|
||||||
|
@ -219,11 +220,35 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile,
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
validPs = append(validPs, p)
|
validPs = append(validPs, p)
|
||||||
|
|
||||||
|
for _, child := range p.Config.Nodes {
|
||||||
|
nodeNames[MachineName(*p.Config, child)] = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inValidPs = removeChildNodes(inValidPs, nodeNames)
|
||||||
return validPs, inValidPs, nil
|
return validPs, inValidPs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeDupes removes duplicates
|
// ListValidProfiles returns profiles in minikube home dir
|
||||||
|
// Unlike `ListProfiles` this function doens't try to get profile from container
|
||||||
|
func ListValidProfiles(miniHome ...string) (ps []*Profile, err error) {
|
||||||
|
// try to get profiles list based on left over evidences such as directory
|
||||||
|
pDirs, err := profileDirs(miniHome...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range pDirs {
|
||||||
|
p, err := LoadProfile(n, miniHome...)
|
||||||
|
if err == nil && p.IsValid() {
|
||||||
|
ps = append(ps, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ps, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeDupes removes duplipcates
|
||||||
func removeDupes(profiles []string) []string {
|
func removeDupes(profiles []string) []string {
|
||||||
// Use map to record duplicates as we find them.
|
// Use map to record duplicates as we find them.
|
||||||
seen := map[string]bool{}
|
seen := map[string]bool{}
|
||||||
|
@ -243,6 +268,18 @@ func removeDupes(profiles []string) []string {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// removeChildNodes remove invalid profiles which have a same name with any sub-node's machine name
|
||||||
|
// it will return nil if invalid profiles are not exists.
|
||||||
|
func removeChildNodes(inValidPs []*Profile, nodeNames map[string]bool) (ps []*Profile) {
|
||||||
|
for _, p := range inValidPs {
|
||||||
|
if _, ok := nodeNames[p.Name]; !ok {
|
||||||
|
ps = append(ps, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
|
||||||
// LoadProfile loads type Profile based on its name
|
// LoadProfile loads type Profile based on its name
|
||||||
func LoadProfile(name string, miniHome ...string) (*Profile, error) {
|
func LoadProfile(name string, miniHome ...string) (*Profile, error) {
|
||||||
cfg, err := DefaultLoader.LoadConfigFromFile(name, miniHome...)
|
cfg, err := DefaultLoader.LoadConfigFromFile(name, miniHome...)
|
||||||
|
@ -291,7 +328,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string {
|
||||||
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
|
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
|
||||||
func MachineName(cc ClusterConfig, n Node) string {
|
func MachineName(cc ClusterConfig, n Node) string {
|
||||||
// For single node cluster, default to back to old naming
|
// For single node cluster, default to back to old naming
|
||||||
if len(cc.Nodes) == 1 || n.ControlPlane {
|
if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.ControlPlane {
|
||||||
return cc.Name
|
return cc.Name
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
|
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
|
||||||
|
|
|
@ -146,10 +146,8 @@ func TestStartHostExists(t *testing.T) {
|
||||||
mc := defaultClusterConfig
|
mc := defaultClusterConfig
|
||||||
mc.Name = ih.Name
|
mc.Name = ih.Name
|
||||||
|
|
||||||
n := config.Node{Name: ih.Name}
|
|
||||||
|
|
||||||
// This should pass without calling Create because the host exists already.
|
// This should pass without calling Create because the host exists already.
|
||||||
h, _, err := StartHost(api, &mc, &n)
|
h, _, err := StartHost(api, &mc, &(mc.Nodes[0]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error starting host: %v", err)
|
t.Fatalf("Error starting host: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,24 @@ const (
|
||||||
|
|
||||||
// Add adds a new node config to an existing cluster.
|
// Add adds a new node config to an existing cluster.
|
||||||
func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error {
|
func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error {
|
||||||
|
profiles, err := config.ListValidProfiles()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
machineName := config.MachineName(*cc, n)
|
||||||
|
for _, p := range profiles {
|
||||||
|
if p.Config.Name == cc.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, existNode := range p.Config.Nodes {
|
||||||
|
if machineName == config.MachineName(*p.Config, existNode) {
|
||||||
|
return errors.Errorf("Node %s already exists in %s profile", machineName, p.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := config.SaveNode(cc, &n); err != nil {
|
if err := config.SaveNode(cc, &n); err != nil {
|
||||||
return errors.Wrap(err, "save node")
|
return errors.Wrap(err, "save node")
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,9 @@ See [Building the minikube ISO](../iso)
|
||||||
We publish CI builds of minikube, built at every Pull Request. Builds are available at (substitute in the relevant PR number):
|
We publish CI builds of minikube, built at every Pull Request. Builds are available at (substitute in the relevant PR number):
|
||||||
|
|
||||||
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-darwin-amd64>
|
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-darwin-amd64>
|
||||||
|
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-darwin-arm64>
|
||||||
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-linux-amd64>
|
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-linux-amd64>
|
||||||
|
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-linux-arm64>
|
||||||
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-windows-amd64.exe>
|
- <https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-windows-amd64.exe>
|
||||||
|
|
||||||
We also publish CI builds of minikube-iso, built at every Pull Request that touches deploy/iso/minikube-iso. Builds are available at:
|
We also publish CI builds of minikube-iso, built at every Pull Request that touches deploy/iso/minikube-iso. Builds are available at:
|
||||||
|
|
|
@ -23,8 +23,6 @@ The Docker driver allows you to install Kubernetes into an existing Docker insta
|
||||||
- [userns-remap](https://docs.docker.com/engine/security/userns-remap/)
|
- [userns-remap](https://docs.docker.com/engine/security/userns-remap/)
|
||||||
- [rootless](https://docs.docker.com/engine/security/rootless/)
|
- [rootless](https://docs.docker.com/engine/security/rootless/)
|
||||||
|
|
||||||
- Docker driver is not supported on non-amd64 architectures such as arm yet. For non-amd64 archs please use [other drivers]({{< ref "/docs/drivers/_index.md" >}})
|
|
||||||
|
|
||||||
- On macOS, containers might get hung and require a restart of Docker for Desktop. See [docker/for-mac#1835](https://github.com/docker/for-mac/issues/1835)
|
- On macOS, containers might get hung and require a restart of Docker for Desktop. See [docker/for-mac#1835](https://github.com/docker/for-mac/issues/1835)
|
||||||
|
|
||||||
- The `ingress`, and `ingress-dns` addons are currently only supported on Linux. See [#7332](https://github.com/kubernetes/minikube/issues/7332)
|
- The `ingress`, and `ingress-dns` addons are currently only supported on Linux. See [#7332](https://github.com/kubernetes/minikube/issues/7332)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install Docker](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc) 18.09 or higher
|
- [Install Docker](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc) 18.09 or higher
|
||||||
- amd64 system.
|
- amd64 or arm64 system.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
|
|
@ -90,11 +90,20 @@ brew link minikube
|
||||||
|
|
||||||
Otherwise, download minikube directly:
|
Otherwise, download minikube directly:
|
||||||
|
|
||||||
|
### x86
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64
|
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64
|
||||||
sudo install minikube-darwin-amd64 /usr/local/bin/minikube
|
sudo install minikube-darwin-amd64 /usr/local/bin/minikube
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### ARM
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-arm64
|
||||||
|
sudo install minikube-darwin-arm64 /usr/local/bin/minikube
|
||||||
|
```
|
||||||
|
|
||||||
{{% /mactab %}}
|
{{% /mactab %}}
|
||||||
{{% windowstab %}}
|
{{% windowstab %}}
|
||||||
|
|
||||||
|
|
|
@ -20,10 +20,13 @@ package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMultiNode(t *testing.T) {
|
func TestMultiNode(t *testing.T) {
|
||||||
|
@ -43,11 +46,13 @@ func TestMultiNode(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{"FreshStart2Nodes", validateMultiNodeStart},
|
{"FreshStart2Nodes", validateMultiNodeStart},
|
||||||
{"AddNode", validateAddNodeToMultiNode},
|
{"AddNode", validateAddNodeToMultiNode},
|
||||||
|
{"ProfileList", validateProfileListWithMultiNode},
|
||||||
{"StopNode", validateStopRunningNode},
|
{"StopNode", validateStopRunningNode},
|
||||||
{"StartAfterStop", validateStartNodeAfterStop},
|
{"StartAfterStop", validateStartNodeAfterStop},
|
||||||
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
||||||
{"StopMultiNode", validateStopMultiNodeCluster},
|
{"StopMultiNode", validateStopMultiNodeCluster},
|
||||||
{"RestartMultiNode", validateRestartMultiNodeCluster},
|
{"RestartMultiNode", validateRestartMultiNodeCluster},
|
||||||
|
{"ValidateNameConflict", validatNameConflict},
|
||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
tc := tc
|
tc := tc
|
||||||
|
@ -109,6 +114,44 @@ func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile strin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateProfileListWithMultiNode(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonObject map[string][]config.Profile
|
||||||
|
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
validProfiles := jsonObject["valid"]
|
||||||
|
var profileObject *config.Profile
|
||||||
|
for _, obj := range validProfiles {
|
||||||
|
if obj.Name == profile {
|
||||||
|
profileObject = &obj
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if profileObject == nil {
|
||||||
|
t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command())
|
||||||
|
} else if expected, numNodes := 3, len(profileObject.Config.Nodes); expected != numNodes {
|
||||||
|
t.Errorf("expected profile %q in json of 'profile list' include %d nodes but have %d nodes. got *%q*. args: %q", profile, expected, numNodes, rr.Stdout.String(), rr.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
if invalidPs, ok := jsonObject["invalid"]; ok {
|
||||||
|
for _, ps := range invalidPs {
|
||||||
|
if strings.Contains(ps.Name, profile) {
|
||||||
|
t.Errorf("expected the json of 'profile list' to not include profile or node in invalid profile but got *%q*. args: %q", rr.Stdout.String(), rr.Command())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
|
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
|
||||||
// Run minikube node stop on that node
|
// Run minikube node stop on that node
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", ThirdNodeName))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", ThirdNodeName))
|
||||||
|
@ -308,3 +351,39 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
|
||||||
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
|
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validatNameConflict(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to run node list. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
curNodeNum := strings.Count(rr.Stdout.String(), profile)
|
||||||
|
|
||||||
|
// Start new profile. It's expected failture
|
||||||
|
profileName := fmt.Sprintf("%s-m0%d", profile, curNodeNum)
|
||||||
|
startArgs := append([]string{"start", "-p", profileName}, StartArgs()...)
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected start profile command to fail. args %q", rr.Command())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start new profile temporary profile to conflict node name.
|
||||||
|
profileName = fmt.Sprintf("%s-m0%d", profile, curNodeNum+1)
|
||||||
|
startArgs = append([]string{"start", "-p", profileName}, StartArgs()...)
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to start profile. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a node to the current cluster. It's expected failture
|
||||||
|
addArgs := []string{"node", "add", "-p", profile}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("expected add node command to fail. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profileName))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue