Check profile, node name to prevent duplication
Check profile and node name before add to prevent conflict with machine name on multinode cluster.pull/10119/head
parent
f0c59ce8f7
commit
a00d632930
|
@ -176,6 +176,8 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
|
||||
if existing != nil {
|
||||
upgradeExistingConfig(existing)
|
||||
} else {
|
||||
validateProfileName()
|
||||
}
|
||||
|
||||
validateSpecifiedDriver(existing)
|
||||
|
@ -638,6 +640,25 @@ func hostDriver(existing *config.ClusterConfig) string {
|
|||
return h.Driver.DriverName()
|
||||
}
|
||||
|
||||
// validateProfileName makes sure that new profile name not duplicated with any of machine names in existing multi-node clusters.
|
||||
func validateProfileName() {
|
||||
profiles, err := config.ListValidProfiles()
|
||||
if err != nil {
|
||||
exit.Message(reason.InternalListConfig, "Unable to list profiles: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
for _, p := range profiles {
|
||||
for _, n := range p.Config.Nodes {
|
||||
machineName := config.MachineName(*p.Config, n)
|
||||
if ClusterFlagValue() == machineName {
|
||||
out.WarningT("Profile name '{{.name}}' is duplicated with machine name '{{.machine}}' in profile '{{.profile}}'", out.V{"name": ClusterFlagValue(),
|
||||
"machine": machineName,
|
||||
"profile": p.Name})
|
||||
exit.Message(reason.Usage, "Profile name should be unique")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// validateSpecifiedDriver makes sure that if a user has passed in a driver
|
||||
// it matches the existing cluster if there is one
|
||||
func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
||||
|
|
|
@ -223,7 +223,25 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile,
|
|||
return validPs, inValidPs, nil
|
||||
}
|
||||
|
||||
// removeDupes removes duplicates
|
||||
// ListValidProfiles returns profiles in minikube home dir
|
||||
// Unlike `ListProfiles` this function doens't try to get profile from container
|
||||
func ListValidProfiles(miniHome ...string) (ps []*Profile, err error) {
|
||||
// try to get profiles list based on left over evidences such as directory
|
||||
pDirs, err := profileDirs(miniHome...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, n := range pDirs {
|
||||
p, err := LoadProfile(n, miniHome...)
|
||||
if err == nil && p.IsValid() {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// removeDupes removes duplipcates
|
||||
func removeDupes(profiles []string) []string {
|
||||
// Use map to record duplicates as we find them.
|
||||
seen := map[string]bool{}
|
||||
|
@ -291,7 +309,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string {
|
|||
// MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names
|
||||
func MachineName(cc ClusterConfig, n Node) string {
|
||||
// For single node cluster, default to back to old naming
|
||||
if len(cc.Nodes) == 1 || n.ControlPlane {
|
||||
if (len(cc.Nodes) == 1 && cc.Nodes[0].Name == n.Name) || n.ControlPlane {
|
||||
return cc.Name
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
|
||||
|
|
|
@ -146,10 +146,8 @@ func TestStartHostExists(t *testing.T) {
|
|||
mc := defaultClusterConfig
|
||||
mc.Name = ih.Name
|
||||
|
||||
n := config.Node{Name: ih.Name}
|
||||
|
||||
// This should pass without calling Create because the host exists already.
|
||||
h, _, err := StartHost(api, &mc, &n)
|
||||
h, _, err := StartHost(api, &mc, &(mc.Nodes[0]))
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting host: %v", err)
|
||||
}
|
||||
|
|
|
@ -38,6 +38,24 @@ const (
|
|||
|
||||
// Add adds a new node config to an existing cluster.
|
||||
func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error {
|
||||
profiles, err := config.ListValidProfiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
machineName := config.MachineName(*cc, n)
|
||||
for _, p := range profiles {
|
||||
if p.Config.Name == cc.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, existNode := range p.Config.Nodes {
|
||||
if machineName == config.MachineName(*p.Config, existNode) {
|
||||
return errors.Errorf("Node %s already exists in %s profile", machineName, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.SaveNode(cc, &n); err != nil {
|
||||
return errors.Wrap(err, "save node")
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ func TestMultiNode(t *testing.T) {
|
|||
{"DeleteNode", validateDeleteNodeFromMultiNode},
|
||||
{"StopMultiNode", validateStopMultiNodeCluster},
|
||||
{"RestartMultiNode", validateRestartMultiNodeCluster},
|
||||
{"ValidateNameConflict", validatNameConflict},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
|
@ -308,3 +309,39 @@ func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile
|
|||
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
|
||||
}
|
||||
}
|
||||
|
||||
func validatNameConflict(ctx context.Context, t *testing.T, profile string) {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "node", "list", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("failed to run node list. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
curNodeNum := strings.Count(rr.Stdout.String(), profile)
|
||||
|
||||
// Start new profile. It's expected failture
|
||||
profileName := fmt.Sprintf("%s-m0%d", profile, curNodeNum)
|
||||
startArgs := append([]string{"start", "-p", profileName}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err == nil {
|
||||
t.Errorf("expected start profile command to fail. args %q", rr.Command())
|
||||
}
|
||||
|
||||
// Start new profile temporary profile to conflict node name.
|
||||
profileName = fmt.Sprintf("%s-m0%d", profile, curNodeNum+1)
|
||||
startArgs = append([]string{"start", "-p", profileName}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start profile. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Add a node to the current cluster. It's expected failture
|
||||
addArgs := []string{"node", "add", "-p", profile}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
|
||||
if err == nil {
|
||||
t.Errorf("expected add node command to fail. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profileName))
|
||||
if err != nil {
|
||||
t.Logf("failed to clean temporary profile. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue