diff --git a/go.mod b/go.mod index e8000701b0..f0b6382a9d 100644 --- a/go.mod +++ b/go.mod @@ -94,6 +94,7 @@ require ( k8s.io/api v0.17.3 k8s.io/apimachinery v0.17.3 k8s.io/client-go v0.17.3 + k8s.io/klog v1.0.0 k8s.io/kubectl v0.0.0 k8s.io/kubernetes v1.17.3 k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab // indirect diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 0b7704e752..5408744837 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -133,11 +133,7 @@ func (k *Bootstrapper) createCompatSymlinks() error { // clearStaleConfigs clears configurations which may have stale IP addresses func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return err - } - + // These are the files that kubeadm will reject stale versions of paths := []string{ "/etc/kubernetes/admin.conf", "/etc/kubernetes/kubelet.conf", @@ -145,6 +141,19 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { "/etc/kubernetes/scheduler.conf", } + args := append([]string{"ls", "-la"}, paths...) + rr, err := k.c.RunCmd(exec.Command("sudo", args...)) + if err != nil { + glog.Infof("config check failed, skipping stale config cleanup: %v", err) + return nil + } + glog.Infof("found existing configuration files:\n%s\n", rr.Stdout.String()) + + cp, err := config.PrimaryControlPlane(&cfg) + if err != nil { + return err + } + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cp.Port))) for _, path := range paths { _, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path)) @@ -469,7 +478,8 @@ func (k *Bootstrapper) needsReconfigure(conf string, hostname string, port int, return true } - glog.Infof("The running cluster does not need reconfiguration. hostname: %s", hostname) + // DANGER: This log message is hard-coded in an integration test! + glog.Infof("The running cluster does not require reconfiguration: %s", hostname) return false } @@ -829,7 +839,7 @@ func (k *Bootstrapper) applyKICOverlay(cfg config.ClusterConfig) error { return err } - ko := path.Join(vmpath.GuestEphemeralDir, fmt.Sprintf("kic_overlay.yaml")) + ko := path.Join(vmpath.GuestEphemeralDir, "kic_overlay.yaml") f := assets.NewMemoryAssetTarget(b.Bytes(), ko, "0644") if err := k.c.Copy(f); err != nil { @@ -899,7 +909,7 @@ func (k *Bootstrapper) elevateKubeSystemPrivileges(cfg config.ClusterConfig) err rr, err := k.c.RunCmd(cmd) if err != nil { // Error from server (AlreadyExists): clusterrolebindings.rbac.authorization.k8s.io "minikube-rbac" already exists - if strings.Contains(rr.Output(), fmt.Sprintf("Error from server (AlreadyExists)")) { + if strings.Contains(rr.Output(), "Error from server (AlreadyExists)") { glog.Infof("rbac %q already exists not need to re-create.", rbacName) return nil } diff --git a/test/integration/pause_test.go b/test/integration/pause_test.go index e0d98a0f6f..5e22318a68 100644 --- a/test/integration/pause_test.go +++ b/test/integration/pause_test.go @@ -65,22 +65,22 @@ func validateFreshStart(ctx context.Context, t *testing.T, profile string) { } } -// validateStartNoReset validates that starting a running cluster won't invoke a reset -func validateStartNoReset(ctx context.Context, t *testing.T, profile string) { +// validateStartNoReconfigure validates that starting a running cluster does not invoke reconfiguration +func validateStartNoReconfigure(ctx context.Context, t *testing.T, profile string) { args := []string{"start", "-p", profile, "--alsologtostderr", "-v=5"} rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { defer clusterLogs(t, profile) t.Fatalf("failed to second start a running minikube with args: %q : %v", rr.Command(), err) } + if !NoneDriver() { - softLog := "The running cluster does not need a reset" + softLog := "The running cluster does not require reconfiguration" if !strings.Contains(rr.Output(), softLog) { defer clusterLogs(t, profile) - t.Errorf("expected the second start log outputs to include %q but got: %s", softLog, rr.Output()) + t.Errorf("expected the second start log output to include %q but got: %s", softLog, rr.Output()) } } - } func validatePause(ctx context.Context, t *testing.T, profile string) {