persist mount settings after stop

pull/12719/head
Steven Powell 2021-10-15 14:42:56 -07:00
parent 0319d4d673
commit d7decc2a8d
6 changed files with 37 additions and 11 deletions

View File

@ -458,6 +458,8 @@ func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName s
SSHPort: viper.GetInt(sshSSHPort),
ExtraDisks: viper.GetInt(extraDisks),
CertExpiration: viper.GetDuration(certExpiration),
Mount: viper.GetBool(createMount),
MountString: viper.GetString(mountString),
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
ClusterName: ClusterFlagValue(),
@ -659,6 +661,8 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
updateBoolFromFlag(cmd, &cc.KubernetesConfig.ShouldLoadCachedImages, cacheImages)
updateIntFromFlag(cmd, &cc.KubernetesConfig.NodePort, apiServerPort)
updateDurationFromFlag(cmd, &cc.CertExpiration, certExpiration)
updateBoolFromFlag(cmd, &cc.Mount, createMount)
updateStringFromFlag(cmd, &cc.MountString, mountString)
if cmd.Flags().Changed(kubernetesVersion) {
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)

View File

@ -85,6 +85,8 @@ type ClusterConfig struct {
MultiNodeRequested bool
ExtraDisks int // currently only implemented for hyperkit and kvm2
CertExpiration time.Duration
Mount bool
MountString string
}
// KubernetesConfig contains the parameters used to configure the VM Kubernetes.

View File

@ -51,22 +51,22 @@ func showVersionInfo(k8sVersion string, cr cruntime.Manager) {
}
// configureMounts configures any requested filesystem mounts
func configureMounts(wg *sync.WaitGroup) {
func configureMounts(wg *sync.WaitGroup, mount bool, mountString string) {
wg.Add(1)
defer wg.Done()
if !viper.GetBool(createMount) {
if !mount {
return
}
out.Step(style.Mounting, "Creating mount {{.name}} ...", out.V{"name": viper.GetString(mountString)})
out.Step(style.Mounting, "Creating mount {{.name}} ...", out.V{"name": mountString})
path := os.Args[0]
mountDebugVal := 0
if klog.V(8).Enabled() {
mountDebugVal = 1
}
profile := viper.GetString("profile")
mountCmd := exec.Command(path, "mount", "-p", profile, fmt.Sprintf("--v=%d", mountDebugVal), viper.GetString(mountString))
mountCmd := exec.Command(path, "mount", "-p", profile, fmt.Sprintf("--v=%d", mountDebugVal), mountString)
mountCmd.Env = append(os.Environ(), constants.IsMinikubeChildProcess+"=true")
if klog.V(8).Enabled() {
mountCmd.Stdout = os.Stdout

View File

@ -32,12 +32,6 @@ import (
"k8s.io/minikube/pkg/minikube/machine"
)
// TODO: Share these between cluster and node packages
const (
mountString = "mount-string"
createMount = "mount"
)
// Add adds a new node config to an existing cluster.
func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error {
profiles, err := config.ListValidProfiles()

View File

@ -167,7 +167,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
var wg sync.WaitGroup
if !driver.IsKIC(starter.Cfg.Driver) {
go configureMounts(&wg)
go configureMounts(&wg, starter.Cfg.Mount, starter.Cfg.MountString)
}
wg.Add(1)

View File

@ -53,6 +53,9 @@ func TestMountStart(t *testing.T) {
{"VerifyMountSecond", validateMount, profile2},
{"DeleteFirst", validateDelete, profile1},
{"VerifyMountPostDelete", validateMount, profile2},
{"Stop", validateMountStop, profile2},
{"RestartStopped", validateRestart, profile2},
{"VerifyMountPostStop", validateMount, profile2},
}
for _, test := range tests {
@ -72,6 +75,7 @@ func validateStartWithMount(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
args := []string{"start", "-p", profile, "--memory=2048", "--mount"}
args = append(args, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
@ -88,3 +92,25 @@ func validateMount(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("mount failed: %q : %v", rr.Command(), err)
}
}
// validateMountStop stops a cluster
func validateMountStop(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
args := []string{"stop", "-p", profile}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("stop failed: %q : %v", rr.Command(), err)
}
}
// validateRestart restarts a cluster
func validateRestart(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
args := []string{"start", "-p", profile}
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("restart failed: %q : %v", rr.Command(), err)
}
}