build: address all lint issues by v2 (#20804)

* fix QF1011: could omit type *os.File from declaration; it will be inferred from the right-hand side

* fix QF1012: Use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...))

* fix QF1001: could apply De Morgan's law

* fix QF1003: could use tagged switch

* fix weakCond: suspicious ; nil check may not be enough, check for len (gocritic)

* fix docStub: silencing go lint doc-comment warnings is unadvised

* fix builtinShadow: shadowing of predeclared identifier: error

* fix importShadow: shadow of imported package

* fix nestingReduce: invert if cond, replace body with , move old body after the statement

* useless-break: useless break in case clause (revive)

* Clear the redundant content in golangci.yaml file
pull/20830/head
Tian 2025-05-24 08:31:28 +08:00 committed by GitHub
parent f89d46d8a2
commit 3cf1e63e21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
79 changed files with 546 additions and 517 deletions

View File

@ -23,23 +23,8 @@ linters:
- "-ST1020" - "-ST1020"
- "-ST1021" - "-ST1021"
- "-ST1022" - "-ST1022"
##### TODO: fix and enable these
# 4 occurrences.
# Use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...)) https://staticcheck.dev/docs/checks#QF1012
- "-QF1012"
# 3 occurrences.
# Apply De Morgans law https://staticcheck.dev/docs/checks#QF1001
- "-QF1001"
# 9 occurrences.
# Convert if/else-if chain to tagged switch https://staticcheck.dev/docs/checks#QF1003
- "-QF1003"
# 1 occurrence.
# could omit type *os.File from declaration; it will be inferred from the right-hand side
- "-QF1011"
##### These have been vetted to be disabled.
# 19 occurrences. Omit embedded fields from selector expression https://staticcheck.dev/docs/checks#QF1008
# Usefulness is questionable.
- "-QF1008" - "-QF1008"
revive: revive:
enable-all-rules: true enable-all-rules: true
rules: rules:
@ -150,23 +135,22 @@ linters:
# - yodaStyleExpr # - yodaStyleExpr
# - typeUnparen # - typeUnparen
##### TODO: fix and enable these
# We enabled these and we pass # We enabled these and we pass
- nilValReturn - nilValReturn
# - weakCond # pkg/minikube/config/profile.go:61:9: weakCond: suspicious `cc.Nodes != nil && cc.Nodes[0].Name == node.Name`; nil check may not be enough, check for len (gocritic) - weakCond
- indexAlloc - indexAlloc
- rangeExprCopy - rangeExprCopy
- boolExprSimplify - boolExprSimplify
- commentedOutImport - commentedOutImport
# - docStub # pkg/minikube/tunnel/kic/service_tunnel.go:51:1: docStub: silencing go lint doc-comment warnings is unadvised (gocritic) - docStub
- emptyFallthrough - emptyFallthrough
- hexLiteral - hexLiteral
- typeAssertChain - typeAssertChain
- unlabelStmt - unlabelStmt
# - builtinShadow # cmd/minikube/cmd/delete.go:89:7: builtinShadow: shadowing of predeclared identifier: error (gocritic) - builtinShadow
# - importShadow # pkg/storage/storage_provisioner.go:60:2: importShadow: shadow of imported package 'path' (gocritic) - importShadow
- initClause - initClause
# - nestingReduce # pkg/minikube/tunnel/registry.go:94:3: nestingReduce: invert if cond, replace body with `continue`, move old body after the statement (gocritic) - nestingReduce
- unnecessaryBlock - unnecessaryBlock
exclusions: exclusions:
@ -181,7 +165,3 @@ linters:
- path: '(.+)\.go$' - path: '(.+)\.go$'
text: "Error return value of `.*` is not checked" text: "Error return value of `.*` is not checked"
linter: errcheck linter: errcheck
# This code is doubtful and I don't understand it. Location: Line 456
- path: 'cmd/minikube/cmd/docker-env.go'
text: "useless-break: useless break in case clause"
linter: revive

View File

@ -162,7 +162,7 @@ func GenerateBashCompletion(w io.Writer, cmd *cobra.Command) error {
} }
// GenerateZshCompletion generates the completion for the zsh shell // GenerateZshCompletion generates the completion for the zsh shell
func GenerateZshCompletion(out io.Writer, cmd *cobra.Command) error { func GenerateZshCompletion(w io.Writer, cmd *cobra.Command) error {
zshAutoloadTag := `#compdef minikube zshAutoloadTag := `#compdef minikube
` `
@ -300,17 +300,17 @@ __minikube_convert_bash_to_zsh() {
<<'BASH_COMPLETION_EOF' <<'BASH_COMPLETION_EOF'
` `
_, err := out.Write([]byte(zshAutoloadTag)) _, err := w.Write([]byte(zshAutoloadTag))
if err != nil { if err != nil {
return err return err
} }
_, err = out.Write([]byte(boilerPlate)) _, err = w.Write([]byte(boilerPlate))
if err != nil { if err != nil {
return err return err
} }
_, err = out.Write([]byte(zshInitialization)) _, err = w.Write([]byte(zshInitialization))
if err != nil { if err != nil {
return err return err
} }
@ -320,7 +320,7 @@ __minikube_convert_bash_to_zsh() {
if err != nil { if err != nil {
return errors.Wrap(err, "Error generating zsh completion") return errors.Wrap(err, "Error generating zsh completion")
} }
_, err = out.Write(buf.Bytes()) _, err = w.Write(buf.Bytes())
if err != nil { if err != nil {
return err return err
} }
@ -330,7 +330,7 @@ BASH_COMPLETION_EOF
} }
__minikube_bash_source <(__minikube_convert_bash_to_zsh) __minikube_bash_source <(__minikube_convert_bash_to_zsh)
` `
_, err = out.Write([]byte(zshTail)) _, err = w.Write([]byte(zshTail))
if err != nil { if err != nil {
return err return err
} }

View File

@ -133,7 +133,7 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) {
type configFile struct { type configFile struct {
Addons addonConfig `json:"addons"` Addons addonConfig `json:"addons"`
} }
var config configFile var cf configFile
if configFilePath != "" { if configFilePath != "" {
out.Ln("Reading %s configs from %s", addon, configFilePath) out.Ln("Reading %s configs from %s", addon, configFilePath)
@ -150,14 +150,14 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) {
fmt.Sprintf("error opening config file: %s", configFilePath)) fmt.Sprintf("error opening config file: %s", configFilePath))
} }
if err = json.Unmarshal(confData, &config); err != nil { if err = json.Unmarshal(confData, &cf); err != nil {
// err = errors2.Wrapf(err, "error reading config file (%s)", configFilePath) // err = errors2.Wrapf(err, "error reading config file (%s)", configFilePath)
klog.Errorf("error reading config file (%s): %v", configFilePath, err) klog.Errorf("error reading config file (%s): %v", configFilePath, err)
exit.Message(reason.Kind{ExitCode: reason.ExProgramConfig, Advice: "provide a valid config file"}, exit.Message(reason.Kind{ExitCode: reason.ExProgramConfig, Advice: "provide a valid config file"},
fmt.Sprintf("error reading config file: %v", err)) fmt.Sprintf("error reading config file: %v", err))
} }
return &config.Addons return &cf.Addons
} }
return nil return nil
} }

View File

@ -92,7 +92,9 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
regCredsConf := &ac.RegistryCreds regCredsConf := &ac.RegistryCreds
awsEcrAction := regCredsConf.EnableAWSEcr // regCredsConf. "enableAWSEcr") awsEcrAction := regCredsConf.EnableAWSEcr // regCredsConf. "enableAWSEcr")
if awsEcrAction == "prompt" || awsEcrAction == "" {
switch awsEcrAction {
case "prompt", "":
enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses) enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses)
if enableAWSECR { if enableAWSECR {
awsAccessID = AskForStaticValue("-- Enter AWS Access Key ID: ") awsAccessID = AskForStaticValue("-- Enter AWS Access Key ID: ")
@ -102,7 +104,7 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID (Comma separated list): ") awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID (Comma separated list): ")
awsRole = AskForStaticValueOptional("-- (Optional) Enter ARN of AWS role to assume: ") awsRole = AskForStaticValueOptional("-- (Optional) Enter ARN of AWS role to assume: ")
} }
} else if awsEcrAction == "enable" { case "enable":
out.Ln("Loading AWS ECR configs from: %s", addonConfigFile) out.Ln("Loading AWS ECR configs from: %s", addonConfigFile)
// Then read the configs // Then read the configs
awsAccessID = regCredsConf.EcrConfigs.AccessID awsAccessID = regCredsConf.EcrConfigs.AccessID
@ -111,15 +113,17 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
awsRegion = regCredsConf.EcrConfigs.Region awsRegion = regCredsConf.EcrConfigs.Region
awsAccount = regCredsConf.EcrConfigs.Account awsAccount = regCredsConf.EcrConfigs.Account
awsRole = regCredsConf.EcrConfigs.Role awsRole = regCredsConf.EcrConfigs.Role
} else if awsEcrAction == "disable" { case "disable":
out.Ln("Ignoring AWS ECR configs") out.Ln("Ignoring AWS ECR configs")
} else { default:
out.Ln("Disabling AWS ECR. Invalid value for enableAWSEcr (%s). Must be one of 'disable', 'enable' or 'prompt'", awsEcrAction) out.Ln("Disabling AWS ECR. Invalid value for enableAWSEcr (%s). Must be one of 'disable', 'enable' or 'prompt'", awsEcrAction)
} }
gcrPath := "" gcrPath := ""
gcrAction := regCredsConf.EnableGCR gcrAction := regCredsConf.EnableGCR
if gcrAction == "prompt" || gcrAction == "" {
switch gcrAction {
case "prompt", "":
enableGCR := AskForYesNoConfirmation("\nDo you want to enable Google Container Registry?", posResponses, negResponses) enableGCR := AskForYesNoConfirmation("\nDo you want to enable Google Container Registry?", posResponses, negResponses)
if enableGCR { if enableGCR {
gcrPath = AskForStaticValue("-- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):") gcrPath = AskForStaticValue("-- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):")
@ -129,14 +133,14 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
gcrURL = AskForStaticValue("-- Enter GCR URL (e.g. https://asia.gcr.io):") gcrURL = AskForStaticValue("-- Enter GCR URL (e.g. https://asia.gcr.io):")
} }
} }
} else if gcrAction == "enable" { case "enable":
out.Ln("Loading GCR configs from: %s", addonConfigFile) out.Ln("Loading GCR configs from: %s", addonConfigFile)
// Then read the configs // Then read the configs
gcrPath = regCredsConf.GcrConfigs.GcrPath gcrPath = regCredsConf.GcrConfigs.GcrPath
gcrURL = regCredsConf.GcrConfigs.GcrURL gcrURL = regCredsConf.GcrConfigs.GcrURL
} else if gcrAction == "disable" { case "disable":
out.Ln("Ignoring GCR configs") out.Ln("Ignoring GCR configs")
} else { default:
out.Ln("Disabling GCR. Invalid value for enableGCR (%s). Must be one of 'disable', 'enable' or 'prompt'", gcrAction) out.Ln("Disabling GCR. Invalid value for enableGCR (%s). Must be one of 'disable', 'enable' or 'prompt'", gcrAction)
} }
@ -152,40 +156,44 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
} }
dockerRegistryAction := regCredsConf.EnableDockerRegistry dockerRegistryAction := regCredsConf.EnableDockerRegistry
if dockerRegistryAction == "prompt" || dockerRegistryAction == "" {
switch dockerRegistryAction {
case "prompt", "":
enableDR := AskForYesNoConfirmation("\nDo you want to enable Docker Registry?", posResponses, negResponses) enableDR := AskForYesNoConfirmation("\nDo you want to enable Docker Registry?", posResponses, negResponses)
if enableDR { if enableDR {
dockerServer = AskForStaticValue("-- Enter docker registry server url: ") dockerServer = AskForStaticValue("-- Enter docker registry server url: ")
dockerUser = AskForStaticValue("-- Enter docker registry username: ") dockerUser = AskForStaticValue("-- Enter docker registry username: ")
dockerPass = AskForPasswordValue("-- Enter docker registry password: ") dockerPass = AskForPasswordValue("-- Enter docker registry password: ")
} }
} else if dockerRegistryAction == "enable" { case "enable":
out.Ln("Loading Docker Registry configs from: %s", addonConfigFile) out.Ln("Loading Docker Registry configs from: %s", addonConfigFile)
dockerServer = regCredsConf.DockerConfigs.DockerServer dockerServer = regCredsConf.DockerConfigs.DockerServer
dockerUser = regCredsConf.DockerConfigs.DockerUser dockerUser = regCredsConf.DockerConfigs.DockerUser
dockerPass = regCredsConf.DockerConfigs.DockerPass dockerPass = regCredsConf.DockerConfigs.DockerPass
} else if dockerRegistryAction == "disable" { case "disable":
out.Ln("Ignoring Docker Registry configs") out.Ln("Ignoring Docker Registry configs")
} else { default:
out.Ln("Disabling Docker Registry. Invalid value for enableDockerRegistry (%s). Must be one of 'disable', 'enable' or 'prompt'", dockerRegistryAction) out.Ln("Disabling Docker Registry. Invalid value for enableDockerRegistry (%s). Must be one of 'disable', 'enable' or 'prompt'", dockerRegistryAction)
} }
acrAction := regCredsConf.EnableACR acrAction := regCredsConf.EnableACR
if acrAction == "prompt" || acrAction == "" {
switch acrAction {
case "prompt", "":
enableACR := AskForYesNoConfirmation("\nDo you want to enable Azure Container Registry?", posResponses, negResponses) enableACR := AskForYesNoConfirmation("\nDo you want to enable Azure Container Registry?", posResponses, negResponses)
if enableACR { if enableACR {
acrURL = AskForStaticValue("-- Enter Azure Container Registry (ACR) URL: ") acrURL = AskForStaticValue("-- Enter Azure Container Registry (ACR) URL: ")
acrClientID = AskForStaticValue("-- Enter client ID (service principal ID) to access ACR: ") acrClientID = AskForStaticValue("-- Enter client ID (service principal ID) to access ACR: ")
acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ") acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ")
} }
} else if acrAction == "enable" { case "enable":
out.Ln("Loading ACR configs from: ", addonConfigFile) out.Ln("Loading ACR configs from: ", addonConfigFile)
acrURL = regCredsConf.AcrConfigs.AcrURL acrURL = regCredsConf.AcrConfigs.AcrURL
acrClientID = regCredsConf.AcrConfigs.AcrClientID acrClientID = regCredsConf.AcrConfigs.AcrClientID
acrPassword = regCredsConf.AcrConfigs.AcrPassword acrPassword = regCredsConf.AcrConfigs.AcrPassword
} else if acrAction == "disable" { case "disable":
out.Ln("Ignoring ACR configs") out.Ln("Ignoring ACR configs")
} else { default:
out.Stringf("Disabling ACR. Invalid value for enableACR (%s). Must be one of 'disable', 'enable' or 'prompt'", acrAction) out.Stringf("Disabling ACR. Invalid value for enableACR (%s). Must be one of 'disable', 'enable' or 'prompt'", acrAction)
} }

View File

@ -157,7 +157,7 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p
klog.Infof("Waiting for kubectl to output host:port ...") klog.Infof("Waiting for kubectl to output host:port ...")
reader := bufio.NewReader(stdoutPipe) reader := bufio.NewReader(stdoutPipe)
var out []byte var outData []byte
for { for {
r, timedOut, err := readByteWithTimeout(reader, 5*time.Second) r, timedOut, err := readByteWithTimeout(reader, 5*time.Second)
if err != nil { if err != nil {
@ -170,10 +170,10 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p
klog.Infof("timed out waiting for input: possibly due to an old kubectl version.") klog.Infof("timed out waiting for input: possibly due to an old kubectl version.")
break break
} }
out = append(out, r) outData = append(outData, r)
} }
klog.Infof("proxy stdout: %s", string(out)) klog.Infof("proxy stdout: %s", string(outData))
return cmd, hostPortRe.FindString(string(out)), nil return cmd, hostPortRe.FindString(string(outData)), nil
} }
// readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred. // readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred.
@ -203,9 +203,9 @@ func readByteWithTimeout(r io.ByteReader, timeout time.Duration) (byte, bool, er
} }
// dashboardURL generates a URL for accessing the dashboard service // dashboardURL generates a URL for accessing the dashboard service
func dashboardURL(proxy string, ns string, svc string) string { func dashboardURL(addr string, ns string, svc string) string {
// Reference: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above // Reference: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", proxy, ns, svc) return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", addr, ns, svc)
} }
// checkURL checks if a URL returns 200 HTTP OK // checkURL checks if a URL returns 200 HTTP OK

View File

@ -86,8 +86,8 @@ type DeletionError struct {
Errtype typeOfError Errtype typeOfError
} }
func (error DeletionError) Error() string { func (deletionError DeletionError) Error() string {
return error.Err.Error() return deletionError.Err.Error()
} }
var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error { var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
@ -527,11 +527,11 @@ func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.N
} }
// HandleDeletionErrors handles deletion errors from DeleteProfiles // HandleDeletionErrors handles deletion errors from DeleteProfiles
func HandleDeletionErrors(errors []error) { func HandleDeletionErrors(errs []error) {
if len(errors) == 1 { if len(errs) == 1 {
handleSingleDeletionError(errors[0]) handleSingleDeletionError(errs[0])
} else { } else {
handleMultipleDeletionErrors(errors) handleMultipleDeletionErrors(errs)
} }
} }
@ -556,10 +556,10 @@ func handleSingleDeletionError(err error) {
} }
} }
func handleMultipleDeletionErrors(errors []error) { func handleMultipleDeletionErrors(errs []error) {
out.ErrT(style.Sad, "Multiple errors deleting profiles") out.ErrT(style.Sad, "Multiple errors deleting profiles")
for _, err := range errors { for _, err := range errs {
deletionError, ok := err.(DeletionError) deletionError, ok := err.(DeletionError)
if ok { if ok {
@ -706,14 +706,14 @@ var isMinikubeProcess = func(pid int) (bool, error) {
// getPids opens the file at PATH and tries to read // getPids opens the file at PATH and tries to read
// one or more space separated pids // one or more space separated pids
func getPids(path string) ([]int, error) { func getPids(path string) ([]int, error) {
out, err := os.ReadFile(path) data, err := os.ReadFile(path)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "ReadFile") return nil, errors.Wrap(err, "ReadFile")
} }
klog.Infof("pidfile contents: %s", out) klog.Infof("pidfile contents: %s", data)
pids := []int{} pids := []int{}
strPids := strings.Fields(string(out)) strPids := strings.Fields(string(data))
for _, p := range strPids { for _, p := range strPids {
intPid, err := strconv.Atoi(p) intPid, err := strconv.Atoi(p)
if err != nil { if err != nil {

View File

@ -462,7 +462,6 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
switch outputFormat { switch outputFormat {
case "": case "":
// shell "none" // shell "none"
break
case "text": case "text":
for k, v := range envVars { for k, v := range envVars {
_, err := fmt.Fprintf(w, "%s=%s\n", k, v) _, err := fmt.Fprintf(w, "%s=%s\n", k, v)
@ -472,11 +471,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
} }
return nil return nil
case "json": case "json":
json, err := json.Marshal(envVars) jsondata, err := json.Marshal(envVars)
if err != nil { if err != nil {
return err return err
} }
_, err = w.Write(json) _, err = w.Write(jsondata)
if err != nil { if err != nil {
return err return err
} }
@ -486,11 +485,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
} }
return nil return nil
case "yaml": case "yaml":
yaml, err := yaml.Marshal(envVars) yamldata, err := yaml.Marshal(envVars)
if err != nil { if err != nil {
return err return err
} }
_, err = w.Write(yaml) _, err = w.Write(yamldata)
if err != nil { if err != nil {
return err return err
} }
@ -509,7 +508,6 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
switch outputFormat { switch outputFormat {
case "": case "":
// shell "none" // shell "none"
break
case "text": case "text":
for _, n := range vars { for _, n := range vars {
_, err := fmt.Fprintf(w, "%s\n", n) _, err := fmt.Fprintf(w, "%s\n", n)
@ -519,11 +517,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
} }
return nil return nil
case "json": case "json":
json, err := json.Marshal(vars) jsondata, err := json.Marshal(vars)
if err != nil { if err != nil {
return err return err
} }
_, err = w.Write(json) _, err = w.Write(jsondata)
if err != nil { if err != nil {
return err return err
} }
@ -533,11 +531,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
} }
return nil return nil
case "yaml": case "yaml":
yaml, err := yaml.Marshal(vars) yamldata, err := yaml.Marshal(vars)
if err != nil { if err != nil {
return err return err
} }
_, err = w.Write(yaml) _, err = w.Write(yamldata)
if err != nil { if err != nil {
return err return err
} }

View File

@ -157,12 +157,12 @@ func KubectlCommand(version, binaryURL string, args ...string) (*exec.Cmd, error
version = constants.DefaultKubernetesVersion version = constants.DefaultKubernetesVersion
} }
path, err := node.CacheKubectlBinary(version, binaryURL) binary, err := node.CacheKubectlBinary(version, binaryURL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return exec.Command(path, args...), nil return exec.Command(binary, args...), nil
} }
func init() { func init() {

View File

@ -63,7 +63,7 @@ var logsCmd = &cobra.Command{
Short: "Returns logs to debug a local Kubernetes cluster", Short: "Returns logs to debug a local Kubernetes cluster",
Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`, Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`,
Run: func(_ *cobra.Command, _ []string) { Run: func(_ *cobra.Command, _ []string) {
var logOutput *os.File = os.Stdout logOutput := os.Stdout
var err error var err error
if fileOutput != "" { if fileOutput != "" {

View File

@ -313,7 +313,7 @@ func removePid(path string, pid string) error {
// we found the correct file // we found the correct file
// we're reading the pids... // we're reading the pids...
out, err := os.ReadFile(pidPath) data, err := os.ReadFile(pidPath)
if err != nil { if err != nil {
return errors.Wrap(err, "readFile") return errors.Wrap(err, "readFile")
} }
@ -321,7 +321,7 @@ func removePid(path string, pid string) error {
pids := []string{} pids := []string{}
// we're splitting the mount-pids file content into a slice of strings // we're splitting the mount-pids file content into a slice of strings
// so that we can compare each to the PID we're looking for // so that we can compare each to the PID we're looking for
strPids := strings.Fields(string(out)) strPids := strings.Fields(string(data))
for _, p := range strPids { for _, p := range strPids {
// If we find the PID, we don't add it to the slice // If we find the PID, we don't add it to the slice
if p == pid { if p == pid {

View File

@ -253,10 +253,10 @@ func podmanUnsetScript(ec PodmanEnvConfig, w io.Writer) error {
// podmanBridge returns the command to use in a var for accessing the podman varlink bridge over ssh // podmanBridge returns the command to use in a var for accessing the podman varlink bridge over ssh
func podmanBridge(client *ssh.ExternalClient) string { func podmanBridge(client *ssh.ExternalClient) string {
command := []string{client.BinaryPath} cmd := []string{client.BinaryPath}
command = append(command, client.BaseArgs...) cmd = append(cmd, client.BaseArgs...)
command = append(command, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge") cmd = append(cmd, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge")
return strings.Join(command, " ") return strings.Join(cmd, " ")
} }
// podmanURL returns the url to use in a var for accessing the podman socket over ssh // podmanURL returns the url to use in a var for accessing the podman socket over ssh

View File

@ -282,7 +282,7 @@ func runStart(cmd *cobra.Command, _ []string) {
} }
} }
kubeconfig, err := startWithDriver(cmd, starter, existing) configInfo, err := startWithDriver(cmd, starter, existing)
if err != nil { if err != nil {
node.ExitIfFatal(err, useForce) node.ExitIfFatal(err, useForce)
exit.Error(reason.GuestStart, "failed to start node", err) exit.Error(reason.GuestStart, "failed to start node", err)
@ -294,7 +294,7 @@ func runStart(cmd *cobra.Command, _ []string) {
} }
} }
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil { if err := showKubectlInfo(configInfo, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil {
klog.Errorf("kubectl info: %v", err) klog.Errorf("kubectl info: %v", err)
} }
} }
@ -363,11 +363,11 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
} }
if driver.IsVM(driverName) && !driver.IsSSH(driverName) { if driver.IsVM(driverName) && !driver.IsSSH(driverName) {
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL)) urlString, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
if err != nil { if err != nil {
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO") return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
} }
cc.MinikubeISO = url cc.MinikubeISO = urlString
} }
var existingAddons map[string]bool var existingAddons map[string]bool
@ -462,9 +462,9 @@ func imageMatchesBinaryVersion(imageVersion, binaryVersion string) bool {
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
// start primary control-plane node // start primary control-plane node
kubeconfig, err := node.Start(starter) configInfo, err := node.Start(starter)
if err != nil { if err != nil {
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) configInfo, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -512,7 +512,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.
pause.RemovePausedFile(starter.Runner) pause.RemovePausedFile(starter.Runner)
return kubeconfig, nil return configInfo, nil
} }
func warnAboutMultiNodeCNI() { func warnAboutMultiNodeCNI() {
@ -528,14 +528,14 @@ func updateDriver(driverName string) {
} }
} }
func displayVersion(version string) { func displayVersion(ver string) {
prefix := "" prefix := ""
if ClusterFlagValue() != constants.DefaultClusterName { if ClusterFlagValue() != constants.DefaultClusterName {
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue()) prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
} }
register.Reg.SetStep(register.InitialSetup) register.Reg.SetStep(register.InitialSetup)
out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()}) out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": ver, "platform": platform()})
} }
// displayEnviron makes the user aware of environment variables that will affect how minikube operates // displayEnviron makes the user aware of environment variables that will affect how minikube operates
@ -631,7 +631,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co
// Re-generate the cluster config, just in case the failure was related to an old config format // Re-generate the cluster config, just in case the failure was related to an old config format
cc := updateExistingConfigFromFlags(cmd, &existing) cc := updateExistingConfigFromFlags(cmd, &existing)
var kubeconfig *kubeconfig.Settings var configInfo *kubeconfig.Settings
for _, n := range cc.Nodes { for _, n := range cc.Nodes {
r, p, m, h, err := node.Provision(&cc, &n, false) r, p, m, h, err := node.Provision(&cc, &n, false)
s := node.Starter{ s := node.Starter{
@ -650,14 +650,14 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co
k, err := node.Start(s) k, err := node.Start(s)
if n.ControlPlane { if n.ControlPlane {
kubeconfig = k configInfo = k
} }
if err != nil { if err != nil {
// Ok we failed again, let's bail // Ok we failed again, let's bail
return nil, err return nil, err
} }
} }
return kubeconfig, nil return configInfo, nil
} }
// Don't delete the cluster unless they ask // Don't delete the cluster unless they ask
return nil, originalErr return nil, originalErr
@ -902,12 +902,12 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) {
// validateDriver validates that the selected driver appears sane, exits if not // validateDriver validates that the selected driver appears sane, exits if not
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) { func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
name := ds.Name driverName := ds.Name
os := detect.RuntimeOS() osName := detect.RuntimeOS()
arch := detect.RuntimeArch() arch := detect.RuntimeArch()
klog.Infof("validating driver %q against %+v", name, existing) klog.Infof("validating driver %q against %+v", driverName, existing)
if !driver.Supported(name) { if !driver.Supported(driverName) {
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": os, "arch": arch}) exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": driverName, "os": osName, "arch": arch})
} }
// if we are only downloading artifacts for a driver, we can stop validation here // if we are only downloading artifacts for a driver, we can stop validation here
@ -916,7 +916,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
} }
st := ds.State st := ds.State
klog.Infof("status for %s: %+v", name, st) klog.Infof("status for %s: %+v", driverName, st)
if st.NeedsImprovement { if st.NeedsImprovement {
out.Styled(style.Improvement, `For improved {{.driver}} performance, {{.fix}}`, out.V{"driver": driver.FullName(ds.Name), "fix": translate.T(st.Fix)}) out.Styled(style.Improvement, `For improved {{.driver}} performance, {{.fix}}`, out.V{"driver": driver.FullName(ds.Name), "fix": translate.T(st.Fix)})
@ -924,7 +924,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
if ds.Priority == registry.Obsolete { if ds.Priority == registry.Obsolete {
exit.Message(reason.Kind{ exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(name)), ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(driverName)),
Advice: translate.T(st.Fix), Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderUnsupported, ExitCode: reason.ExProviderUnsupported,
URL: st.Doc, URL: st.Doc,
@ -943,23 +943,23 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
if !st.Installed { if !st.Installed {
exit.Message(reason.Kind{ exit.Message(reason.Kind{
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)), ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(driverName)),
Advice: translate.T(st.Fix), Advice: translate.T(st.Fix),
ExitCode: reason.ExProviderNotFound, ExitCode: reason.ExProviderNotFound,
URL: st.Doc, URL: st.Doc,
Style: style.Shrug, Style: style.Shrug,
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error}) }, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": driverName, "error": st.Error})
} }
id := st.Reason id := st.Reason
if id == "" { if id == "" {
id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name)) id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(driverName))
} }
code := reason.ExProviderUnavailable code := reason.ExProviderUnavailable
if !st.Running { if !st.Running {
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name)) id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(driverName))
code = reason.ExProviderNotRunning code = reason.ExProviderNotRunning
} }
@ -1515,15 +1515,15 @@ func defaultRuntime() string {
} }
// if container runtime is not docker, check that cni is not disabled // if container runtime is not docker, check that cni is not disabled
func validateCNI(cmd *cobra.Command, runtime string) { func validateCNI(cmd *cobra.Command, runtimeName string) {
if runtime == constants.Docker { if runtimeName == constants.Docker {
return return
} }
if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" { if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" {
if viper.GetBool(force) { if viper.GetBool(force) {
out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime}) out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName})
} else { } else {
exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime}) exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName})
} }
} }
} }
@ -2004,16 +2004,16 @@ func validateBareMetal(drvName string) {
if err != nil { if err != nil {
klog.Warningf("failed getting Kubernetes version: %v", err) klog.Warningf("failed getting Kubernetes version: %v", err)
} }
version, _ := util.ParseKubernetesVersion(kubeVer) ver, _ := util.ParseKubernetesVersion(kubeVer)
if version.GTE(semver.MustParse("1.18.0-beta.1")) { if ver.GTE(semver.MustParse("1.18.0-beta.1")) {
if _, err := exec.LookPath("conntrack"); err != nil { if _, err := exec.LookPath("conntrack"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()}) exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": ver.String()})
} }
} }
// crictl is required starting with Kubernetes 1.24, for all runtimes since the removal of dockershim // crictl is required starting with Kubernetes 1.24, for all runtimes since the removal of dockershim
if version.GTE(semver.MustParse("1.24.0-alpha.0")) { if ver.GTE(semver.MustParse("1.24.0-alpha.0")) {
if _, err := exec.LookPath("crictl"); err != nil { if _, err := exec.LookPath("crictl"); err != nil {
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": version.String()}) exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": ver.String()})
} }
} }
} }
@ -2062,24 +2062,24 @@ func startNerdctld() {
runner := co.CP.Runner runner := co.CP.Runner
// and set 777 to these files // and set 777 to these files
if out, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil { if rest, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil {
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", out.Output()), err) exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", rest.Output()), err)
} }
// sudo systemctl start nerdctld.socket // sudo systemctl start nerdctld.socket
if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil { if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil {
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", out.Output()), err) exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", rest.Output()), err)
} }
// sudo systemctl start nerdctld.service // sudo systemctl start nerdctld.service
if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil { if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil {
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", out.Output()), err) exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", rest.Output()), err)
} }
// set up environment variable on remote machine. docker client uses 'non-login & non-interactive shell' therefore the only way is to modify .bashrc file of user 'docker' // set up environment variable on remote machine. docker client uses 'non-login & non-interactive shell' therefore the only way is to modify .bashrc file of user 'docker'
// insert this at 4th line // insert this at 4th line
envSetupCommand := exec.Command("/bin/bash", "-c", "sed -i '4i export DOCKER_HOST=unix:///run/nerdctld.sock' .bashrc") envSetupCommand := exec.Command("/bin/bash", "-c", "sed -i '4i export DOCKER_HOST=unix:///run/nerdctld.sock' .bashrc")
if out, err := runner.RunCmd(envSetupCommand); err != nil { if rest, err := runner.RunCmd(envSetupCommand); err != nil {
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", out.Output()), err) exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", rest.Output()), err)
} }
} }

View File

@ -156,13 +156,13 @@ func checkLogFileMaxSize(file string, maxSizeKB int64) bool {
// logFileName generates a default logfile name in the form minikube_<argv[1]>_<hash>_<count>.log from args // logFileName generates a default logfile name in the form minikube_<argv[1]>_<hash>_<count>.log from args
func logFileName(dir string, logIdx int64) string { func logFileName(dir string, logIdx int64) string {
h := sha1.New() h := sha1.New()
user, err := user.Current() userInfo, err := user.Current()
if err != nil { if err != nil {
klog.Warningf("Unable to get username to add to log filename hash: %v", err) klog.Warningf("Unable to get username to add to log filename hash: %v", err)
} else { } else {
_, err := h.Write([]byte(user.Username)) _, err := h.Write([]byte(userInfo.Username))
if err != nil { if err != nil {
klog.Warningf("Unable to add username %s to log filename hash: %v", user.Username, err) klog.Warningf("Unable to add username %s to log filename hash: %v", userInfo.Username, err)
} }
} }
for _, s := range pflag.Args() { for _, s := range pflag.Args() {

View File

@ -63,11 +63,13 @@ func execute() error {
p := plot.New() p := plot.New()
// Set view options // Set view options
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)" p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)"
} else if runtime.GOOS == "linux" { case "linux":
p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)" p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)"
} }
p.Y.Label.Text = "CPU overhead%" p.Y.Label.Text = "CPU overhead%"
// Open non-autopause csv file of benchmark summary // Open non-autopause csv file of benchmark summary
@ -158,9 +160,10 @@ func execute() error {
p.Legend.Top = true p.Legend.Top = true
// Add x-lay names // Add x-lay names
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind") p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind")
} else if runtime.GOOS == "linux" { case "linux":
p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind") p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind")
} }
@ -223,16 +226,18 @@ func execute() error {
p.Add(napl, apl) p.Add(napl, apl)
// Output bar graph // Output bar graph
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil { if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil {
return errors.Wrap(err, "Failed to create bar graph png") return errors.Wrap(err, "Failed to create bar graph png")
} }
log.Printf("Generated graph png to %s/mac.png", FOLDER) log.Printf("Generated graph png to %s/mac.png", FOLDER)
} else if runtime.GOOS == "linux" { case "linux":
if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil { if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil {
return errors.Wrap(err, "Failed to create bar graph png") return errors.Wrap(err, "Failed to create bar graph png")
} }
log.Printf("Generated graph png to %s/linux.png", FOLDER) log.Printf("Generated graph png to %s/linux.png", FOLDER)
} }
return nil return nil
} }

View File

@ -62,11 +62,13 @@ func execute() error {
p := plot.New() p := plot.New()
// Set view options // Set view options
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on macOS (less is better)" p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on macOS (less is better)"
} else if runtime.GOOS == "linux" { case "linux":
p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on Linux (less is better)" p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on Linux (less is better)"
} }
p.Y.Label.Text = "CPU overhead%" p.Y.Label.Text = "CPU overhead%"
// Open csv file of benchmark summary // Open csv file of benchmark summary
@ -114,9 +116,10 @@ func execute() error {
p.Legend.Top = true p.Legend.Top = true
// Add x-lay names // Add x-lay names
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind") p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind")
} else if runtime.GOOS == "linux" { case "linux":
p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind") p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind")
} }
@ -151,16 +154,18 @@ func execute() error {
p.Add(cl) p.Add(cl)
// Output bar graph // Output bar graph
if runtime.GOOS == "darwin" { switch runtime.GOOS {
case "darwin":
if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil { if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil {
return errors.Wrap(err, "Failed to create bar graph png") return errors.Wrap(err, "Failed to create bar graph png")
} }
log.Printf("Generated graph png to %s/mac.png", FOLDER) log.Printf("Generated graph png to %s/mac.png", FOLDER)
} else if runtime.GOOS == "linux" { case "linux":
if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil { if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil {
return errors.Wrap(err, "Failed to create bar graph png") return errors.Wrap(err, "Failed to create bar graph png")
} }
log.Printf("Generated graph png to %s/linux.png", FOLDER) log.Printf("Generated graph png to %s/linux.png", FOLDER)
} }
return nil return nil
} }

View File

@ -113,7 +113,7 @@ func updateHashFile(version, arch, filePath string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -117,7 +117,7 @@ func updateHashFile(version, arch, packagePath string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -107,7 +107,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -107,7 +107,7 @@ func updateHashFile(version string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -135,7 +135,7 @@ func updateHashFile(filePath, commit string, shaSum [sha256.Size]byte) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, commit)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, commit); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -111,7 +111,7 @@ func updateHashFile(version, arch, packagePath string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -92,7 +92,7 @@ func updateHashFiles(version string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x crun-%s.tar.gz\n", sum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x crun-%s.tar.gz\n", sum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -105,7 +105,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -95,7 +95,7 @@ func updateHashFile(version, arch, folderSuffix string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x docker-%s.tgz\n", sum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x docker-%s.tgz\n", sum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -154,7 +154,7 @@ func updateGoHashFile(version string) error {
return fmt.Errorf("failed to open go.hash file: %v", err) return fmt.Errorf("failed to open go.hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %s go%s.src.tar.gz\n", sha, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %s go%s.src.tar.gz\n", sha, version); err != nil {
return fmt.Errorf("failed to write to go.hash file: %v", err) return fmt.Errorf("failed to write to go.hash file: %v", err)
} }
return nil return nil

View File

@ -105,7 +105,7 @@ func updateHashFile(version, arch, packagePath string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -91,7 +91,7 @@ func updateHashFiles(version string) error {
return fmt.Errorf("failed to open hash file: %v", err) return fmt.Errorf("failed to open hash file: %v", err)
} }
defer f.Close() defer f.Close()
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil { if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil {
return fmt.Errorf("failed to write to hash file: %v", err) return fmt.Errorf("failed to write to hash file: %v", err)
} }
return nil return nil

View File

@ -37,9 +37,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
} }
class := defaultStorageClassProvisioner class := defaultStorageClassProvisioner
if name == "storage-provisioner-gluster" { switch name {
case "storage-provisioner-gluster":
class = "glusterfile" class = "glusterfile"
} else if name == "storage-provisioner-rancher" { case "storage-provisioner-rancher":
class = "local-path" class = "local-path"
} }

View File

@ -456,20 +456,20 @@ func (d *Driver) Stop() error {
} }
} }
runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec}) crMgr, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec})
if err != nil { // won't return error because: if err != nil { // won't return error because:
// even though we can't stop the cotainers inside, we still wanna stop the minikube container itself // even though we can't stop the cotainers inside, we still wanna stop the minikube container itself
klog.Errorf("unable to get container runtime: %v", err) klog.Errorf("unable to get container runtime: %v", err)
} else { } else {
containers, err := runtime.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces}) containers, err := crMgr.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces})
if err != nil { if err != nil {
klog.Infof("unable list containers : %v", err) klog.Infof("unable list containers : %v", err)
} }
if len(containers) > 0 { if len(containers) > 0 {
if err := runtime.StopContainers(containers); err != nil { if err := crMgr.StopContainers(containers); err != nil {
klog.Infof("unable to stop containers : %v", err) klog.Infof("unable to stop containers : %v", err)
} }
if err := runtime.KillContainers(containers); err != nil { if err := crMgr.KillContainers(containers); err != nil {
klog.Errorf("unable to kill containers : %v", err) klog.Errorf("unable to kill containers : %v", err)
} }
} }

View File

@ -109,7 +109,7 @@ func CreateNetwork(ociBin, networkName, subnet, staticIP string) (net.IP, error)
return info.gateway, nil return info.gateway, nil
} }
// don't retry if error is not address is taken // don't retry if error is not address is taken
if !(errors.Is(err, ErrNetworkSubnetTaken) || errors.Is(err, ErrNetworkGatewayTaken)) { if !errors.Is(err, ErrNetworkSubnetTaken) && !errors.Is(err, ErrNetworkGatewayTaken) {
klog.Errorf("error while trying to create %s network %s %s: %v", ociBin, networkName, subnet.CIDR, err) klog.Errorf("error while trying to create %s network %s %s: %v", ociBin, networkName, subnet.CIDR, err)
return nil, fmt.Errorf("un-retryable: %w", err) return nil, fmt.Errorf("un-retryable: %w", err)
} }

View File

@ -191,9 +191,10 @@ func CreateContainerNode(p CreateParams) error { //nolint to suppress cyclomatic
runArgs = append(runArgs, "--ip", p.IP) runArgs = append(runArgs, "--ip", p.IP)
} }
if p.GPUs == "all" || p.GPUs == "nvidia" { switch p.GPUs {
case "all", "nvidia":
runArgs = append(runArgs, "--gpus", "all", "--env", "NVIDIA_DRIVER_CAPABILITIES=all") runArgs = append(runArgs, "--gpus", "all", "--env", "NVIDIA_DRIVER_CAPABILITIES=all")
} else if p.GPUs == "amd" { case "amd":
/* https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html /* https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html
* "--security-opt seccomp=unconfined" is also required but included above. * "--security-opt seccomp=unconfined" is also required but included above.
*/ */

View File

@ -225,15 +225,15 @@ func (d *Driver) createNetwork() error {
log.Debugf("created network xml: %s", networkXML.String()) log.Debugf("created network xml: %s", networkXML.String())
// define the network using our template // define the network using our template
var network *libvirt.Network var libvirtNet *libvirt.Network
network, err = conn.NetworkDefineXML(networkXML.String()) libvirtNet, err = conn.NetworkDefineXML(networkXML.String())
if err != nil { if err != nil {
return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err) return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err)
} }
// and finally create & start it // and finally create & start it
log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR) log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR)
if err = network.Create(); err == nil { if err = libvirtNet.Create(); err == nil {
log.Debugf("private KVM network %s %s created", d.PrivateNetwork, subnet.CIDR) log.Debugf("private KVM network %s %s created", d.PrivateNetwork, subnet.CIDR)
return nil return nil
} }
@ -263,7 +263,7 @@ func (d *Driver) deleteNetwork() error {
// network: private // network: private
log.Debugf("Checking if network %s exists...", d.PrivateNetwork) log.Debugf("Checking if network %s exists...", d.PrivateNetwork)
network, err := conn.LookupNetworkByName(d.PrivateNetwork) libvirtNet, err := conn.LookupNetworkByName(d.PrivateNetwork)
if err != nil { if err != nil {
if lvErr(err).Code == libvirt.ERR_NO_NETWORK { if lvErr(err).Code == libvirt.ERR_NO_NETWORK {
log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork) log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork)
@ -271,7 +271,7 @@ func (d *Driver) deleteNetwork() error {
} }
return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork) return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork)
} }
defer func() { _ = network.Free() }() defer func() { _ = libvirtNet.Free() }()
log.Debugf("Network %s exists", d.PrivateNetwork) log.Debugf("Network %s exists", d.PrivateNetwork)
err = d.checkDomains(conn) err = d.checkDomains(conn)
@ -283,18 +283,18 @@ func (d *Driver) deleteNetwork() error {
log.Debugf("Trying to delete network %s...", d.PrivateNetwork) log.Debugf("Trying to delete network %s...", d.PrivateNetwork)
deleteFunc := func() error { deleteFunc := func() error {
active, err := network.IsActive() active, err := libvirtNet.IsActive()
if err != nil { if err != nil {
return err return err
} }
if active { if active {
log.Debugf("Destroying active network %s", d.PrivateNetwork) log.Debugf("Destroying active network %s", d.PrivateNetwork)
if err := network.Destroy(); err != nil { if err := libvirtNet.Destroy(); err != nil {
return err return err
} }
} }
log.Debugf("Undefining inactive network %s", d.PrivateNetwork) log.Debugf("Undefining inactive network %s", d.PrivateNetwork)
return network.Undefine() return libvirtNet.Undefine()
} }
if err := retry.Local(deleteFunc, 10*time.Second); err != nil { if err := retry.Local(deleteFunc, 10*time.Second); err != nil {
return errors.Wrap(err, "deleting network") return errors.Wrap(err, "deleting network")
@ -391,23 +391,23 @@ func (d *Driver) checkDomains(conn *libvirt.Connect) error {
// addStaticIP appends new host's name, MAC and static IP address record to list of network DHCP leases. // addStaticIP appends new host's name, MAC and static IP address record to list of network DHCP leases.
// It will return nil if host record already exists. // It will return nil if host record already exists.
func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error { func addStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error {
l, err := dhcpLease(conn, network, hostname, mac, ip) l, err := dhcpLease(conn, networkName, hostname, mac, ip)
if err != nil { if err != nil {
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err) return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err)
} }
if l != nil { if l != nil {
log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip) log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip)
return nil return nil
} }
net, err := conn.LookupNetworkByName(network) libvirtNet, err := conn.LookupNetworkByName(networkName)
if err != nil { if err != nil {
return fmt.Errorf("failed looking up network %s: %w", network, err) return fmt.Errorf("failed looking up network %s: %w", networkName, err)
} }
defer func() { _ = net.Free() }() defer func() { _ = libvirtNet.Free() }()
return net.Update( return libvirtNet.Update(
libvirt.NETWORK_UPDATE_COMMAND_ADD_LAST, libvirt.NETWORK_UPDATE_COMMAND_ADD_LAST,
libvirt.NETWORK_SECTION_IP_DHCP_HOST, libvirt.NETWORK_SECTION_IP_DHCP_HOST,
-1, -1,
@ -417,23 +417,23 @@ func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error
// delStaticIP deletes static IP address record that matches given combination of host's name, MAC and IP from list of network DHCP leases. // delStaticIP deletes static IP address record that matches given combination of host's name, MAC and IP from list of network DHCP leases.
// It will return nil if record doesn't exist. // It will return nil if record doesn't exist.
func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error { func delStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error {
l, err := dhcpLease(conn, network, hostname, mac, ip) l, err := dhcpLease(conn, networkName, hostname, mac, ip)
if err != nil { if err != nil {
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err) return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err)
} }
if l == nil { if l == nil {
log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip) log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip)
return nil return nil
} }
net, err := conn.LookupNetworkByName(network) libvirtNet, err := conn.LookupNetworkByName(networkName)
if err != nil { if err != nil {
return fmt.Errorf("failed looking up network %s: %w", network, err) return fmt.Errorf("failed looking up network %s: %w", networkName, err)
} }
defer func() { _ = net.Free() }() defer func() { _ = libvirtNet.Free() }()
return net.Update( return libvirtNet.Update(
libvirt.NETWORK_UPDATE_COMMAND_DELETE, libvirt.NETWORK_UPDATE_COMMAND_DELETE,
libvirt.NETWORK_SECTION_IP_DHCP_HOST, libvirt.NETWORK_SECTION_IP_DHCP_HOST,
-1, -1,
@ -442,56 +442,56 @@ func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error
} }
// dhcpLease returns network DHCP lease that matches given combination of host's name, MAC and IP. // dhcpLease returns network DHCP lease that matches given combination of host's name, MAC and IP.
func dhcpLease(conn *libvirt.Connect, network, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) { func dhcpLease(conn *libvirt.Connect, networkName, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) {
if hostname == "" && mac == "" && ip == "" { if hostname == "" && mac == "" && ip == "" {
return nil, nil return nil, nil
} }
net, err := conn.LookupNetworkByName(network) libvirtNet, err := conn.LookupNetworkByName(networkName)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed looking up network %s: %w", network, err) return nil, fmt.Errorf("failed looking up network %s: %w", networkName, err)
} }
defer func() { _ = net.Free() }() defer func() { _ = libvirtNet.Free() }()
leases, err := net.GetDHCPLeases() leases, err := libvirtNet.GetDHCPLeases()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed getting host DHCP leases: %w", err) return nil, fmt.Errorf("failed getting host DHCP leases: %w", err)
} }
for _, l := range leases { for _, l := range leases {
if (hostname == "" || hostname == l.Hostname) && (mac == "" || mac == l.Mac) && (ip == "" || ip == l.IPaddr) { if (hostname == "" || hostname == l.Hostname) && (mac == "" || mac == l.Mac) && (ip == "" || ip == l.IPaddr) {
log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, network, l) log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, networkName, l)
return &l, nil return &l, nil
} }
} }
log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, network) log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, networkName)
return nil, nil return nil, nil
} }
// ipFromAPI returns current primary IP address of domain interface in network. // ipFromAPI returns current primary IP address of domain interface in network.
func ipFromAPI(conn *libvirt.Connect, domain, network string) (string, error) { func ipFromAPI(conn *libvirt.Connect, domain, networkName string) (string, error) {
mac, err := macFromXML(conn, domain, network) mac, err := macFromXML(conn, domain, networkName)
if err != nil { if err != nil {
return "", fmt.Errorf("failed getting MAC address: %w", err) return "", fmt.Errorf("failed getting MAC address: %w", err)
} }
ifaces, err := ifListFromAPI(conn, domain) ifaces, err := ifListFromAPI(conn, domain)
if err != nil { if err != nil {
return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", network, domain, err) return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", networkName, domain, err)
} }
for _, i := range ifaces { for _, i := range ifaces {
if i.Hwaddr == mac { if i.Hwaddr == mac {
if i.Addrs != nil { if i.Addrs != nil {
log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, network) log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, networkName)
return i.Addrs[0].Addr, nil return i.Addrs[0].Addr, nil
} }
log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, network, i) log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, networkName, i)
return "", nil return "", nil
} }
} }
log.Debugf("unable to find current IP address of domain %s in network %s", domain, network) log.Debugf("unable to find current IP address of domain %s in network %s", domain, networkName)
return "", nil return "", nil
} }
@ -522,40 +522,40 @@ func ifListFromAPI(conn *libvirt.Connect, domain string) ([]libvirt.DomainInterf
} }
// ipFromXML returns defined IP address of interface in network. // ipFromXML returns defined IP address of interface in network.
func ipFromXML(conn *libvirt.Connect, domain, network string) (string, error) { func ipFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) {
mac, err := macFromXML(conn, domain, network) mac, err := macFromXML(conn, domain, networkName)
if err != nil { if err != nil {
return "", fmt.Errorf("failed getting MAC address: %w", err) return "", fmt.Errorf("failed getting MAC address: %w", err)
} }
lease, err := dhcpLease(conn, network, "", mac, "") lease, err := dhcpLease(conn, networkName, "", mac, "")
if err != nil { if err != nil {
return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: <any>, mac: %q, ip: <any>}: %w", network, mac, err) return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: <any>, mac: %q, ip: <any>}: %w", networkName, mac, err)
} }
if lease == nil { if lease == nil {
log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", network, mac) log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", networkName, mac)
return "", nil return "", nil
} }
log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, network) log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, networkName)
return lease.IPaddr, nil return lease.IPaddr, nil
} }
// macFromXML returns defined MAC address of interface in network from domain XML. // macFromXML returns defined MAC address of interface in network from domain XML.
func macFromXML(conn *libvirt.Connect, domain, network string) (string, error) { func macFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) {
domIfs, err := ifListFromXML(conn, domain) domIfs, err := ifListFromXML(conn, domain)
if err != nil { if err != nil {
return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", network, domain, err) return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", networkName, domain, err)
} }
for _, i := range domIfs { for _, i := range domIfs {
if i.Source.Network == network { if i.Source.Network == networkName {
log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, network) log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, networkName)
return i.Mac.Address, nil return i.Mac.Address, nil
} }
} }
return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", network, domain, network) return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", networkName, domain, networkName)
} }
// ifListFromXML returns defined domain interfaces from domain XML. // ifListFromXML returns defined domain interfaces from domain XML.

View File

@ -97,17 +97,17 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer) error {
buf.WriteString(long + "\n\n") buf.WriteString(long + "\n\n")
if cmd.Runnable() { if cmd.Runnable() {
buf.WriteString(fmt.Sprintf("```shell\n%s\n```\n\n", cmd.UseLine())) fmt.Fprintf(buf, "```shell\n%s\n```\n\n", cmd.UseLine())
} }
if len(cmd.Aliases) > 0 { if len(cmd.Aliases) > 0 {
buf.WriteString("### Aliases\n\n") buf.WriteString("### Aliases\n\n")
buf.WriteString(fmt.Sprintf("%s\n\n", cmd.Aliases)) fmt.Fprintf(buf, "%s\n\n", cmd.Aliases)
} }
if len(cmd.Example) > 0 { if len(cmd.Example) > 0 {
buf.WriteString("### Examples\n\n") buf.WriteString("### Examples\n\n")
buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example)
} }
if err := printOptions(buf, cmd); err != nil { if err := printOptions(buf, cmd); err != nil {

View File

@ -83,7 +83,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error {
// This is the numeric code of the error, e.g. 80 for ExGuest Error // This is the numeric code of the error, e.g. 80 for ExGuest Error
code := s.Value code := s.Value
buf.WriteString(fmt.Sprintf("%s: %s \n", code, currentError)) fmt.Fprintf(buf, "%s: %s \n", code, currentError)
} }
return true return true
}) })
@ -100,7 +100,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error {
currentNode = id.Name currentNode = id.Name
if strings.HasPrefix(currentNode, "Ex") && currentNode != "ExitCode" { if strings.HasPrefix(currentNode, "Ex") && currentNode != "ExitCode" {
// We have all the info we're going to get on this error, print it out // We have all the info we're going to get on this error, print it out
buf.WriteString(fmt.Sprintf("%s (Exit code %v) \n", currentID, currentNode)) fmt.Fprintf(buf, "%s (Exit code %v) \n", currentID, currentNode)
if currentComment != "" { if currentComment != "" {
buf.WriteString(currentComment + " \n") buf.WriteString(currentComment + " \n")
} }

View File

@ -48,21 +48,21 @@ var (
) )
// ClientConfig returns the client configuration for a kubectl context // ClientConfig returns the client configuration for a kubectl context
func ClientConfig(context string) (*rest.Config, error) { func ClientConfig(ctx string) (*rest.Config, error) {
loader := clientcmd.NewDefaultClientConfigLoadingRules() loader := clientcmd.NewDefaultClientConfigLoadingRules()
cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: context}) cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: ctx})
c, err := cc.ClientConfig() c, err := cc.ClientConfig()
if err != nil { if err != nil {
return nil, fmt.Errorf("client config: %v", err) return nil, fmt.Errorf("client config: %v", err)
} }
c = proxy.UpdateTransport(c) c = proxy.UpdateTransport(c)
klog.V(1).Infof("client config for %s: %+v", context, c) klog.V(1).Infof("client config for %s: %+v", ctx, c)
return c, nil return c, nil
} }
// Client gets the Kubernetes client for a kubectl context name // Client gets the Kubernetes client for a kubectl context name
func Client(context string) (*kubernetes.Clientset, error) { func Client(ctx string) (*kubernetes.Clientset, error) {
c, err := ClientConfig(context) c, err := ClientConfig(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -579,8 +579,8 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error {
// canRead returns true if the file represented // canRead returns true if the file represented
// by path exists and is readable, otherwise false. // by path exists and is readable, otherwise false.
func canRead(path string) bool { func canRead(filePath string) bool {
f, err := os.Open(path) f, err := os.Open(filePath)
if err != nil { if err != nil {
return false return false
} }

View File

@ -164,9 +164,11 @@ func auxiliary(mirror string) []string {
func storageProvisioner(mirror string) string { func storageProvisioner(mirror string) string {
cv := version.GetStorageProvisionerVersion() cv := version.GetStorageProvisionerVersion()
in := "k8s-minikube/storage-provisioner:" + cv in := "k8s-minikube/storage-provisioner:" + cv
if mirror == "" {
switch mirror {
case "":
mirror = "gcr.io" mirror = "gcr.io"
} else if mirror == constants.AliyunMirror { case constants.AliyunMirror:
in = "storage-provisioner:" + cv in = "storage-provisioner:" + cv
} }
return path.Join(mirror, in) return path.Join(mirror, in)

View File

@ -172,7 +172,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) {
// init initialises primary control-plane using kubeadm. // init initialises primary control-plane using kubeadm.
func (k *Bootstrapper) init(cfg config.ClusterConfig) error { func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "parsing Kubernetes version") return errors.Wrap(err, "parsing Kubernetes version")
} }
@ -195,7 +195,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
"Swap", // For "none" users who have swap configured "Swap", // For "none" users who have swap configured
"NumCPU", // For "none" users who have too few CPUs "NumCPU", // For "none" users who have too few CPUs
} }
if version.GE(semver.MustParse("1.20.0")) { if ver.GE(semver.MustParse("1.20.0")) {
ignore = append(ignore, ignore = append(ignore,
"Mem", // For "none" users who have too little memory "Mem", // For "none" users who have too little memory
) )
@ -719,7 +719,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
// and by that time we would exit completely, so we wait until kubelet begins restarting pods // and by that time we would exit completely, so we wait until kubelet begins restarting pods
klog.Info("waiting for restarted kubelet to initialise ...") klog.Info("waiting for restarted kubelet to initialise ...")
start := time.Now() start := time.Now()
wait := func() error { waitFunc := func() error {
pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"}) pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"})
if err != nil { if err != nil {
return err return err
@ -731,7 +731,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
} }
return fmt.Errorf("kubelet not initialised") return fmt.Errorf("kubelet not initialised")
} }
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute) _ = retry.Expo(waitFunc, 250*time.Millisecond, 1*time.Minute)
klog.Infof("kubelet initialised") klog.Infof("kubelet initialised")
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start)) klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
} }
@ -784,11 +784,11 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd))
// avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error // avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error
version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion) ver, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
if err != nil { if err != nil {
return "", errors.Wrap(err, "parsing Kubernetes version") return "", errors.Wrap(err, "parsing Kubernetes version")
} }
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: version}) cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: ver})
if err != nil { if err != nil {
klog.Errorf("cruntime: %v", err) klog.Errorf("cruntime: %v", err)
} }
@ -840,11 +840,11 @@ func StopKubernetes(runner command.Runner, cr cruntime.Manager) {
// DeleteCluster removes the components that were started earlier // DeleteCluster removes the components that were started earlier
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion) ver, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "parsing Kubernetes version") return errors.Wrap(err, "parsing Kubernetes version")
} }
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: version}) cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: ver})
if err != nil { if err != nil {
return errors.Wrap(err, "runtime") return errors.Wrap(err, "runtime")
} }
@ -852,7 +852,7 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion) ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion)
sp := cr.SocketPath() sp := cr.SocketPath()
cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp) cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp)
if version.LT(semver.MustParse("1.11.0")) { if ver.LT(semver.MustParse("1.11.0")) {
cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp) cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp)
} }
@ -874,12 +874,12 @@ func (k *Bootstrapper) SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCm
func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
klog.Infof("updating cluster %+v ...", cfg) klog.Infof("updating cluster %+v ...", cfg)
images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion) imgs, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "kubeadm images") return errors.Wrap(err, "kubeadm images")
} }
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "parsing Kubernetes version") return errors.Wrap(err, "parsing Kubernetes version")
} }
@ -887,7 +887,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
Type: cfg.KubernetesConfig.ContainerRuntime, Type: cfg.KubernetesConfig.ContainerRuntime,
Runner: k.c, Runner: k.c,
Socket: cfg.KubernetesConfig.CRISocket, Socket: cfg.KubernetesConfig.CRISocket,
KubernetesVersion: version, KubernetesVersion: ver,
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "runtime") return errors.Wrap(err, "runtime")
@ -903,7 +903,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
} }
if cfg.KubernetesConfig.ShouldLoadCachedImages { if cfg.KubernetesConfig.ShouldLoadCachedImages {
if err := machine.LoadCachedImages(&cfg, k.c, images, detect.ImageCacheDir(), false); err != nil { if err := machine.LoadCachedImages(&cfg, k.c, imgs, detect.ImageCacheDir(), false); err != nil {
out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err}) out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err})
} }
} }

View File

@ -34,21 +34,21 @@ import (
) )
// HostIP gets the ip address to be used for mapping host -> VM and VM -> host // HostIP gets the ip address to be used for mapping host -> VM and VM -> host
func HostIP(host *host.Host, clusterName string) (net.IP, error) { func HostIP(hostInfo *host.Host, clusterName string) (net.IP, error) {
switch host.DriverName { switch hostInfo.DriverName {
case driver.Docker: case driver.Docker:
return oci.RoutableHostIPFromInside(oci.Docker, clusterName, host.Name) return oci.RoutableHostIPFromInside(oci.Docker, clusterName, hostInfo.Name)
case driver.Podman: case driver.Podman:
return oci.RoutableHostIPFromInside(oci.Podman, clusterName, host.Name) return oci.RoutableHostIPFromInside(oci.Podman, clusterName, hostInfo.Name)
case driver.SSH: case driver.SSH:
ip, err := host.Driver.GetIP() ip, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address") return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address")
} }
return net.ParseIP(ip), nil return net.ParseIP(ip), nil
case driver.KVM2: case driver.KVM2:
// `host.Driver.GetIP` returns dhcp lease info for a given network(=`virsh net-dhcp-leases minikube-net`) // `host.Driver.GetIP` returns dhcp lease info for a given network(=`virsh net-dhcp-leases minikube-net`)
vmIPString, err := host.Driver.GetIP() vmIPString, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address") return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address")
} }
@ -59,7 +59,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
} }
return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil
case driver.QEMU, driver.QEMU2: case driver.QEMU, driver.QEMU2:
ipString, err := host.Driver.GetIP() ipString, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return []byte{}, errors.Wrap(err, "Error getting IP address") return []byte{}, errors.Wrap(err, "Error getting IP address")
} }
@ -70,7 +70,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
// socket_vmnet network case // socket_vmnet network case
return net.ParseIP("192.168.105.1"), nil return net.ParseIP("192.168.105.1"), nil
case driver.HyperV: case driver.HyperV:
v := reflect.ValueOf(host.Driver).Elem() v := reflect.ValueOf(hostInfo.Driver).Elem()
var hypervVirtualSwitch string var hypervVirtualSwitch string
// We don't have direct access to hyperv.Driver so use reflection to retrieve the virtual switch name // We don't have direct access to hyperv.Driver so use reflection to retrieve the virtual switch name
for i := 0; i < v.NumField(); i++ { for i := 0; i < v.NumField(); i++ {
@ -91,7 +91,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
return ip, nil return ip, nil
case driver.VirtualBox: case driver.VirtualBox:
vBoxManageCmd := driver.VBoxManagePath() vBoxManageCmd := driver.VBoxManagePath()
out, err := exec.Command(vBoxManageCmd, "showvminfo", host.Name, "--machinereadable").Output() out, err := exec.Command(vBoxManageCmd, "showvminfo", hostInfo.Name, "--machinereadable").Output()
if err != nil { if err != nil {
return []byte{}, errors.Wrap(err, "vboxmanage") return []byte{}, errors.Wrap(err, "vboxmanage")
} }
@ -126,11 +126,11 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
return net.ParseIP(ip), nil return net.ParseIP(ip), nil
case driver.HyperKit: case driver.HyperKit:
vmIPString, _ := host.Driver.GetIP() vmIPString, _ := hostInfo.Driver.GetIP()
gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1" gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1"
return net.ParseIP(gatewayIPString), nil return net.ParseIP(gatewayIPString), nil
case driver.VMware: case driver.VMware:
vmIPString, err := host.Driver.GetIP() vmIPString, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return []byte{}, errors.Wrap(err, "Error getting VM IP address") return []byte{}, errors.Wrap(err, "Error getting VM IP address")
} }
@ -140,28 +140,28 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
} }
return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil
case driver.VFKit: case driver.VFKit:
vmIPString, _ := host.Driver.GetIP() vmIPString, _ := hostInfo.Driver.GetIP()
gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1" gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1"
return net.ParseIP(gatewayIPString), nil return net.ParseIP(gatewayIPString), nil
case driver.None: case driver.None:
return net.ParseIP("127.0.0.1"), nil return net.ParseIP("127.0.0.1"), nil
default: default:
return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", host.DriverName) return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", hostInfo.DriverName)
} }
} }
// DriverIP gets the ip address of the current minikube cluster // DriverIP gets the ip address of the current minikube cluster
func DriverIP(api libmachine.API, machineName string) (net.IP, error) { func DriverIP(api libmachine.API, machineName string) (net.IP, error) {
host, err := machine.LoadHost(api, machineName) hostInfo, err := machine.LoadHost(api, machineName)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ipStr, err := host.Driver.GetIP() ipStr, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getting IP") return nil, errors.Wrap(err, "getting IP")
} }
if driver.IsKIC(host.DriverName) { if driver.IsKIC(hostInfo.DriverName) {
ipStr = oci.DefaultBindIPV4 ipStr = oci.DefaultBindIPV4
} }
ip := net.ParseIP(ipStr) ip := net.ParseIP(ipStr)

View File

@ -245,18 +245,18 @@ func ConfigureDefaultBridgeCNIs(r Runner, networkPlugin string) error {
// disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d). // disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d).
func disableAllBridgeCNIs(r Runner) error { func disableAllBridgeCNIs(r Runner) error {
path := "/etc/cni/net.d" cniPath := "/etc/cni/net.d"
out, err := r.RunCmd(exec.Command( out, err := r.RunCmd(exec.Command(
// for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name) // for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name)
"sudo", "find", path, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c", "sudo", "find", cniPath, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c",
`sudo mv {} {}.mk_disabled`, ";")) `sudo mv {} {}.mk_disabled`, ";"))
if err != nil { if err != nil {
return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", path, err) return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", cniPath, err)
} }
configs := strings.Trim(out.Stdout.String(), ", ") configs := strings.Trim(out.Stdout.String(), ", ")
if len(configs) == 0 { if len(configs) == 0 {
klog.Infof("no active bridge cni configs found in %q - nothing to disable", path) klog.Infof("no active bridge cni configs found in %q - nothing to disable", cniPath)
return nil return nil
} }
klog.Infof("disabled [%s] bridge cni config(s)", configs) klog.Infof("disabled [%s] bridge cni config(s)", configs)

View File

@ -44,10 +44,10 @@ type kicRunner struct {
} }
// NewKICRunner returns a kicRunner implementor of runner which runs cmds inside a container // NewKICRunner returns a kicRunner implementor of runner which runs cmds inside a container
func NewKICRunner(containerNameOrID string, oci string) Runner { func NewKICRunner(containerNameOrID string, ociName string) Runner {
return &kicRunner{ return &kicRunner{
nameOrID: containerNameOrID, nameOrID: containerNameOrID,
ociBin: oci, // docker or podman ociBin: ociName, // docker or podman
} }
} }
@ -271,8 +271,8 @@ func copyToPodman(src string, dest string) error {
defer file.Close() defer file.Close()
parts := strings.Split(dest, ":") parts := strings.Split(dest, ":")
container := parts[0] container := parts[0]
path := parts[1] containerPath := parts[1]
cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", path) cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", containerPath)
cmd.Stdin = file cmd.Stdin = file
klog.Infof("Run: %v", cmd) klog.Infof("Run: %v", cmd)
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {

View File

@ -58,7 +58,7 @@ func ControlPlanes(cc ClusterConfig) []Node {
func IsPrimaryControlPlane(cc ClusterConfig, node Node) bool { func IsPrimaryControlPlane(cc ClusterConfig, node Node) bool {
// TODO (prezha): find where, for "none" driver, we set first (ie, primary control-plane) node name to "m01" - that should not happen but it's happening before pr #17909 // TODO (prezha): find where, for "none" driver, we set first (ie, primary control-plane) node name to "m01" - that should not happen but it's happening before pr #17909
// return node.ControlPlane && node.Name == "" // return node.ControlPlane && node.Name == ""
return cc.Nodes != nil && cc.Nodes[0].Name == node.Name return len(cc.Nodes) > 0 && cc.Nodes[0].Name == node.Name
} }
// IsValid checks if the profile has the essential info needed for a profile // IsValid checks if the profile has the essential info needed for a profile

View File

@ -281,9 +281,9 @@ func (r *Containerd) ListImages(ListImagesOptions) ([]ListImage, error) {
} }
// LoadImage loads an image into this runtime // LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error { func (r *Containerd) LoadImage(imagePath string) error {
klog.Infof("Loading image: %s", path) klog.Infof("Loading image: %s", imagePath)
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", path) c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", imagePath)
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "ctr images import") return errors.Wrapf(err, "ctr images import")
} }
@ -296,9 +296,9 @@ func (r *Containerd) PullImage(name string) error {
} }
// SaveImage save an image from this runtime // SaveImage save an image from this runtime
func (r *Containerd) SaveImage(name string, path string) error { func (r *Containerd) SaveImage(name string, destPath string) error {
klog.Infof("Saving image %s: %s", name, path) klog.Infof("Saving image %s: %s", name, destPath)
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", path, name) c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", destPath, name)
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrapf(err, "ctr images export") return errors.Wrapf(err, "ctr images export")
} }
@ -526,11 +526,11 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error {
cRuntime := cc.KubernetesConfig.ContainerRuntime cRuntime := cc.KubernetesConfig.ContainerRuntime
// If images already exist, return // If images already exist, return
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "getting images") return errors.Wrap(err, "getting images")
} }
if containerdImagesPreloaded(r.Runner, images) { if containerdImagesPreloaded(r.Runner, imgs) {
klog.Info("Images already preloaded, skipping extraction") klog.Info("Images already preloaded, skipping extraction")
return nil return nil
} }
@ -583,7 +583,7 @@ func (r *Containerd) Restart() error {
} }
// containerdImagesPreloaded returns true if all images have been preloaded // containerdImagesPreloaded returns true if all images have been preloaded
func containerdImagesPreloaded(runner command.Runner, images []string) bool { func containerdImagesPreloaded(runner command.Runner, imgs []string) bool {
var rr *command.RunResult var rr *command.RunResult
imageList := func() (err error) { imageList := func() (err error) {
@ -604,7 +604,7 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool {
} }
// Make sure images == imgs // Make sure images == imgs
for _, i := range images { for _, i := range imgs {
found := false found := false
for _, ji := range jsonImages.Images { for _, ji := range jsonImages.Images {
for _, rt := range ji.RepoTags { for _, rt := range ji.RepoTags {
@ -629,6 +629,6 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool {
} }
// ImagesPreloaded returns true if all images have been preloaded // ImagesPreloaded returns true if all images have been preloaded
func (r *Containerd) ImagesPreloaded(images []string) bool { func (r *Containerd) ImagesPreloaded(imgs []string) bool {
return containerdImagesPreloaded(r.Runner, images) return containerdImagesPreloaded(r.Runner, imgs)
} }

View File

@ -271,9 +271,9 @@ func (r *CRIO) ListImages(ListImagesOptions) ([]ListImage, error) {
} }
// LoadImage loads an image into this runtime // LoadImage loads an image into this runtime
func (r *CRIO) LoadImage(path string) error { func (r *CRIO) LoadImage(imgPath string) error {
klog.Infof("Loading image: %s", path) klog.Infof("Loading image: %s", imgPath)
c := exec.Command("sudo", "podman", "load", "-i", path) c := exec.Command("sudo", "podman", "load", "-i", imgPath)
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "crio load image") return errors.Wrap(err, "crio load image")
} }
@ -286,9 +286,9 @@ func (r *CRIO) PullImage(name string) error {
} }
// SaveImage saves an image from this runtime // SaveImage saves an image from this runtime
func (r *CRIO) SaveImage(name string, path string) error { func (r *CRIO) SaveImage(name string, destPath string) error {
klog.Infof("Saving image %s: %s", name, path) klog.Infof("Saving image %s: %s", name, destPath)
c := exec.Command("sudo", "podman", "save", name, "-o", path) c := exec.Command("sudo", "podman", "save", name, "-o", destPath)
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "crio save image") return errors.Wrap(err, "crio save image")
} }
@ -425,11 +425,11 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error {
cRuntime := cc.KubernetesConfig.ContainerRuntime cRuntime := cc.KubernetesConfig.ContainerRuntime
// If images already exist, return // If images already exist, return
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "getting images") return errors.Wrap(err, "getting images")
} }
if crioImagesPreloaded(r.Runner, images) { if crioImagesPreloaded(r.Runner, imgs) {
klog.Info("Images already preloaded, skipping extraction") klog.Info("Images already preloaded, skipping extraction")
return nil return nil
} }
@ -477,7 +477,7 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error {
} }
// crioImagesPreloaded returns true if all images have been preloaded // crioImagesPreloaded returns true if all images have been preloaded
func crioImagesPreloaded(runner command.Runner, images []string) bool { func crioImagesPreloaded(runner command.Runner, imgs []string) bool {
rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json")) rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json"))
if err != nil { if err != nil {
return false return false
@ -491,7 +491,7 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool {
} }
// Make sure images == imgs // Make sure images == imgs
for _, i := range images { for _, i := range imgs {
found := false found := false
for _, ji := range jsonImages.Images { for _, ji := range jsonImages.Images {
for _, rt := range ji.RepoTags { for _, rt := range ji.RepoTags {
@ -516,6 +516,6 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool {
} }
// ImagesPreloaded returns true if all images have been preloaded // ImagesPreloaded returns true if all images have been preloaded
func (r *CRIO) ImagesPreloaded(images []string) bool { func (r *CRIO) ImagesPreloaded(imgs []string) bool {
return crioImagesPreloaded(r.Runner, images) return crioImagesPreloaded(r.Runner, imgs)
} }

View File

@ -285,9 +285,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) {
Tag string `json:"Tag"` Tag string `json:"Tag"`
Size string `json:"Size"` Size string `json:"Size"`
} }
images := strings.Split(rr.Stdout.String(), "\n") imgs := strings.Split(rr.Stdout.String(), "\n")
result := []ListImage{} result := []ListImage{}
for _, img := range images { for _, img := range imgs {
if img == "" { if img == "" {
continue continue
} }
@ -313,9 +313,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) {
} }
// LoadImage loads an image into this runtime // LoadImage loads an image into this runtime
func (r *Docker) LoadImage(path string) error { func (r *Docker) LoadImage(imgPath string) error {
klog.Infof("Loading image: %s", path) klog.Infof("Loading image: %s", imgPath)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", path)) c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", imgPath))
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "loadimage docker") return errors.Wrap(err, "loadimage docker")
} }
@ -336,9 +336,9 @@ func (r *Docker) PullImage(name string) error {
} }
// SaveImage saves an image from this runtime // SaveImage saves an image from this runtime
func (r *Docker) SaveImage(name string, path string) error { func (r *Docker) SaveImage(name string, imagePath string) error {
klog.Infof("Saving image %s: %s", name, path) klog.Infof("Saving image %s: %s", name, imagePath)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, path)) c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, imagePath))
if _, err := r.Runner.RunCmd(c); err != nil { if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "saveimage docker") return errors.Wrap(err, "saveimage docker")
} }
@ -594,13 +594,14 @@ func (r *Docker) configureDocker(driver string) error {
StorageDriver: "overlay2", StorageDriver: "overlay2",
} }
if r.GPUs == "all" || r.GPUs == "nvidia" { switch r.GPUs {
case "all", "nvidia":
assets.Addons["nvidia-device-plugin"].EnableByDefault() assets.Addons["nvidia-device-plugin"].EnableByDefault()
daemonConfig.DefaultRuntime = "nvidia" daemonConfig.DefaultRuntime = "nvidia"
runtimes := &dockerDaemonRuntimes{} runtimes := &dockerDaemonRuntimes{}
runtimes.Nvidia.Path = "/usr/bin/nvidia-container-runtime" runtimes.Nvidia.Path = "/usr/bin/nvidia-container-runtime"
daemonConfig.Runtimes = runtimes daemonConfig.Runtimes = runtimes
} else if r.GPUs == "amd" { case "amd":
assets.Addons["amd-gpu-device-plugin"].EnableByDefault() assets.Addons["amd-gpu-device-plugin"].EnableByDefault()
} }
@ -624,11 +625,11 @@ func (r *Docker) Preload(cc config.ClusterConfig) error {
cRuntime := cc.KubernetesConfig.ContainerRuntime cRuntime := cc.KubernetesConfig.ContainerRuntime
// If images already exist, return // If images already exist, return
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion) imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
if err != nil { if err != nil {
return errors.Wrap(err, "getting images") return errors.Wrap(err, "getting images")
} }
if dockerImagesPreloaded(r.Runner, images) { if dockerImagesPreloaded(r.Runner, imgs) {
klog.Info("Images already preloaded, skipping extraction") klog.Info("Images already preloaded, skipping extraction")
return nil return nil
} }
@ -687,7 +688,7 @@ func (r *Docker) Preload(cc config.ClusterConfig) error {
} }
// dockerImagesPreloaded returns true if all images have been preloaded // dockerImagesPreloaded returns true if all images have been preloaded
func dockerImagesPreloaded(runner command.Runner, images []string) bool { func dockerImagesPreloaded(runner command.Runner, imgs []string) bool {
rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}")) rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}"))
if err != nil { if err != nil {
klog.Warning(err) klog.Warning(err)
@ -702,7 +703,7 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
klog.Infof("Got preloaded images: %s", rr.Output()) klog.Infof("Got preloaded images: %s", rr.Output())
// Make sure images == imgs // Make sure images == imgs
for _, i := range images { for _, i := range imgs {
i = image.TrimDockerIO(i) i = image.TrimDockerIO(i)
if _, ok := preloadedImages[i]; !ok { if _, ok := preloadedImages[i]; !ok {
klog.Infof("%s wasn't preloaded", i) klog.Infof("%s wasn't preloaded", i)
@ -759,8 +760,8 @@ func dockerBoundToContainerd(runner command.Runner) bool {
} }
// ImagesPreloaded returns true if all images have been preloaded // ImagesPreloaded returns true if all images have been preloaded
func (r *Docker) ImagesPreloaded(images []string) bool { func (r *Docker) ImagesPreloaded(imgs []string) bool {
return dockerImagesPreloaded(r.Runner, images) return dockerImagesPreloaded(r.Runner, imgs)
} }
const ( const (

View File

@ -67,8 +67,8 @@ func LocalISOResource(isoURL string) string {
} }
// fileURI returns a file:// URI for a path // fileURI returns a file:// URI for a path
func fileURI(path string) string { func fileURI(filePath string) string {
return "file://" + filepath.ToSlash(path) return "file://" + filepath.ToSlash(filePath)
} }
// localISOPath returns where an ISO should be stored locally // localISOPath returns where an ISO should be stored locally

View File

@ -250,10 +250,10 @@ func saveChecksumFile(k8sVersion, containerRuntime string, checksum []byte) erro
// verifyChecksum returns true if the checksum of the local binary matches // verifyChecksum returns true if the checksum of the local binary matches
// the checksum of the remote binary // the checksum of the remote binary
func verifyChecksum(k8sVersion, containerRuntime, path string) error { func verifyChecksum(k8sVersion, containerRuntime, binaryPath string) error {
klog.Infof("verifying checksum of %s ...", path) klog.Infof("verifying checksum of %s ...", binaryPath)
// get md5 checksum of tarball path // get md5 checksum of tarball path
contents, err := os.ReadFile(path) contents, err := os.ReadFile(binaryPath)
if err != nil { if err != nil {
return errors.Wrap(err, "reading tarball") return errors.Wrap(err, "reading tarball")
} }
@ -266,7 +266,7 @@ func verifyChecksum(k8sVersion, containerRuntime, path string) error {
// create a slice of checksum, which is [16]byte // create a slice of checksum, which is [16]byte
if string(remoteChecksum) != string(checksum[:]) { if string(remoteChecksum) != string(checksum[:]) {
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", path, string(remoteChecksum), string(checksum[:])) return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", binaryPath, string(remoteChecksum), string(checksum[:]))
} }
return nil return nil
} }

View File

@ -159,7 +159,7 @@ func extractDriverVersion(s string) string {
return strings.TrimPrefix(v, "v") return strings.TrimPrefix(v, "v")
} }
func driverExists(driver string) bool { func driverExists(driverName string) bool {
_, err := exec.LookPath(driver) _, err := exec.LookPath(driverName)
return err == nil return err == nil
} }

View File

@ -38,28 +38,28 @@ func IsBootpdBlocked(cc config.ClusterConfig) bool {
if cc.Driver != driver.QEMU2 || runtime.GOOS != "darwin" || cc.Network != "socket_vmnet" { if cc.Driver != driver.QEMU2 || runtime.GOOS != "darwin" || cc.Network != "socket_vmnet" {
return false return false
} }
out, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output() rest, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output()
if err != nil { if err != nil {
klog.Warningf("failed to get firewall state: %v", err) klog.Warningf("failed to get firewall state: %v", err)
return false return false
} }
if regexp.MustCompile(`Firewall is disabled`).Match(out) { if regexp.MustCompile(`Firewall is disabled`).Match(rest) {
return false return false
} }
out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output() rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output()
if err != nil { if err != nil {
// macOS < 15 or other issue: need to use --list. // macOS < 15 or other issue: need to use --list.
klog.Warningf("failed to list firewall allowedsinged option: %v", err) klog.Warningf("failed to list firewall allowedsinged option: %v", err)
// macOS >= 15: bootpd may be allowed as builtin software // macOS >= 15: bootpd may be allowed as builtin software
} else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(out) { } else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(rest) {
return false return false
} }
out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output() rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output()
if err != nil { if err != nil {
klog.Warningf("failed to list firewall apps: %v", err) klog.Warningf("failed to list firewall apps: %v", err)
return false return false
} }
return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(out) return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(rest)
} }
// UnblockBootpd adds bootpd to the built-in macOS firewall and then unblocks it // UnblockBootpd adds bootpd to the built-in macOS firewall and then unblocks it

View File

@ -340,6 +340,6 @@ func normalizeTagName(image string) string {
// Remove docker.io prefix since it won't be included in image names // Remove docker.io prefix since it won't be included in image names
// when we call `docker images`. // when we call `docker images`.
func TrimDockerIO(name string) string { func TrimDockerIO(imageName string) string {
return strings.TrimPrefix(name, "docker.io/") return strings.TrimPrefix(imageName, "docker.io/")
} }

View File

@ -184,9 +184,9 @@ func replaceWinDriveLetterToVolumeName(s string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
path := vname + s[3:] p := vname + s[3:]
return path, nil return p, nil
} }
func getWindowsVolumeNameCmd(d string) (string, error) { func getWindowsVolumeNameCmd(d string) (string, error) {

View File

@ -42,7 +42,7 @@ import (
var buildRoot = path.Join(vmpath.GuestPersistentDir, "build") var buildRoot = path.Join(vmpath.GuestPersistentDir, "build")
// BuildImage builds image to all profiles // BuildImage builds image to all profiles
func BuildImage(path string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error { func BuildImage(srcPath string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error {
api, err := NewAPIClient() api, err := NewAPIClient()
if err != nil { if err != nil {
return errors.Wrap(err, "api") return errors.Wrap(err, "api")
@ -52,12 +52,12 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o
succeeded := []string{} succeeded := []string{}
failed := []string{} failed := []string{}
u, err := url.Parse(path) u, err := url.Parse(srcPath)
if err == nil && u.Scheme == "file" { if err == nil && u.Scheme == "file" {
path = u.Path srcPath = u.Path
} }
remote := err == nil && u.Scheme != "" remote := err == nil && u.Scheme != ""
if runtime.GOOS == "windows" && filepath.VolumeName(path) != "" { if runtime.GOOS == "windows" && filepath.VolumeName(srcPath) != "" {
remote = false remote = false
} }
@ -116,9 +116,9 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o
return err return err
} }
if remote { if remote {
err = buildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt) err = buildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt)
} else { } else {
err = transferAndBuildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt) err = transferAndBuildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt)
} }
if err != nil { if err != nil {
failed = append(failed, m) failed = append(failed, m)

View File

@ -73,19 +73,19 @@ func CacheImagesForBootstrapper(imageRepository, version string) error {
} }
// LoadCachedImages loads previously cached images into the container runtime // LoadCachedImages loads previously cached images into the container runtime
func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string, overwrite bool) error { func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, imgs []string, cacheDir string, overwrite bool) error {
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner}) cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil { if err != nil {
return errors.Wrap(err, "runtime") return errors.Wrap(err, "runtime")
} }
// Skip loading images if images already exist // Skip loading images if images already exist
if !overwrite && cr.ImagesPreloaded(images) { if !overwrite && cr.ImagesPreloaded(imgs) {
klog.Infof("Images are preloaded, skipping loading") klog.Infof("Images are preloaded, skipping loading")
return nil return nil
} }
klog.Infof("LoadCachedImages start: %s", images) klog.Infof("LoadCachedImages start: %s", imgs)
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -102,19 +102,19 @@ func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
} }
} }
for _, image := range images { for _, img := range imgs {
image := image img := img
g.Go(func() error { g.Go(func() error {
// Put a ten second limit on deciding if an image needs transfer // Put a ten second limit on deciding if an image needs transfer
// because it takes much less than that time to just transfer the image. // because it takes much less than that time to just transfer the image.
// This is needed because if running in offline mode, we can spend minutes here // This is needed because if running in offline mode, we can spend minutes here
// waiting for i/o timeout. // waiting for i/o timeout.
err := timedNeedsTransfer(imgClient, image, cr, 10*time.Second) err := timedNeedsTransfer(imgClient, img, cr, 10*time.Second)
if err == nil { if err == nil {
return nil return nil
} }
klog.Infof("%q needs transfer: %v", image, err) klog.Infof("%q needs transfer: %v", img, err)
return transferAndLoadCachedImage(runner, cc.KubernetesConfig, image, cacheDir) return transferAndLoadCachedImage(runner, cc.KubernetesConfig, img, cacheDir)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -172,10 +172,10 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager
// LoadLocalImages loads images into the container runtime // LoadLocalImages loads images into the container runtime
func LoadLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string) error { func LoadLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range images {
image := image img := img
g.Go(func() error { g.Go(func() error {
return transferAndLoadImage(runner, cc.KubernetesConfig, image, image) return transferAndLoadImage(runner, cc.KubernetesConfig, img, img)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -353,10 +353,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range images {
image := image img := img
g.Go(func() error { g.Go(func() error {
return transferAndSaveCachedImage(runner, cc.KubernetesConfig, image, cacheDir) return transferAndSaveCachedImage(runner, cc.KubernetesConfig, img, cacheDir)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -369,10 +369,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
// SaveLocalImages saves images from the container runtime // SaveLocalImages saves images from the container runtime
func SaveLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string, output string) error { func SaveLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string, output string) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range images {
image := image img := img
g.Go(func() error { g.Go(func() error {
return transferAndSaveImage(runner, cc.KubernetesConfig, output, image) return transferAndSaveImage(runner, cc.KubernetesConfig, output, img)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -527,8 +527,8 @@ func transferAndSaveImage(cr command.Runner, k8s config.KubernetesConfig, dst st
} }
// pullImages pulls images to the container run time // pullImages pulls images to the container run time
func pullImages(cruntime cruntime.Manager, images []string) error { func pullImages(crMgr cruntime.Manager, imgs []string) error {
klog.Infof("pullImages start: %s", images) klog.Infof("pullImages start: %s", imgs)
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -537,10 +537,10 @@ func pullImages(cruntime cruntime.Manager, images []string) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range imgs {
image := image img := img
g.Go(func() error { g.Go(func() error {
return cruntime.PullImage(image) return crMgr.PullImage(img)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -588,11 +588,11 @@ func PullImages(images []string, profile *config.Profile) error {
if err != nil { if err != nil {
return err return err
} }
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil { if err != nil {
return errors.Wrap(err, "error creating container runtime") return errors.Wrap(err, "error creating container runtime")
} }
err = pullImages(cruntime, images) err = pullImages(crMgr, images)
if err != nil { if err != nil {
failed = append(failed, m) failed = append(failed, m)
klog.Warningf("Failed to pull images for profile %s %v", pName, err.Error()) klog.Warningf("Failed to pull images for profile %s %v", pName, err.Error())
@ -608,8 +608,8 @@ func PullImages(images []string, profile *config.Profile) error {
} }
// removeImages removes images from the container run time // removeImages removes images from the container run time
func removeImages(cruntime cruntime.Manager, images []string) error { func removeImages(crMgr cruntime.Manager, imgs []string) error {
klog.Infof("removeImages start: %s", images) klog.Infof("removeImages start: %s", imgs)
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -618,10 +618,10 @@ func removeImages(cruntime cruntime.Manager, images []string) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range imgs {
image := image img := img
g.Go(func() error { g.Go(func() error {
return cruntime.RemoveImage(image) return crMgr.RemoveImage(img)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -669,11 +669,11 @@ func RemoveImages(images []string, profile *config.Profile) error {
if err != nil { if err != nil {
return err return err
} }
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil { if err != nil {
return errors.Wrap(err, "error creating container runtime") return errors.Wrap(err, "error creating container runtime")
} }
err = removeImages(cruntime, images) err = removeImages(crMgr, images)
if err != nil { if err != nil {
failed = append(failed, m) failed = append(failed, m)
klog.Warningf("Failed to remove images for profile %s %v", pName, err.Error()) klog.Warningf("Failed to remove images for profile %s %v", pName, err.Error())
@ -757,19 +757,19 @@ func ListImages(profile *config.Profile, format string) error {
} }
renderImagesTable(data) renderImagesTable(data)
case "json": case "json":
json, err := json.Marshal(uniqueImages) jsondata, err := json.Marshal(uniqueImages)
if err != nil { if err != nil {
klog.Warningf("Error marshalling images list: %v", err.Error()) klog.Warningf("Error marshalling images list: %v", err.Error())
return nil return nil
} }
fmt.Printf("%s\n", json) fmt.Printf("%s\n", jsondata)
case "yaml": case "yaml":
yaml, err := yaml.Marshal(uniqueImages) yamldata, err := yaml.Marshal(uniqueImages)
if err != nil { if err != nil {
klog.Warningf("Error marshalling images list: %v", err.Error()) klog.Warningf("Error marshalling images list: %v", err.Error())
return nil return nil
} }
fmt.Printf("%s\n", yaml) fmt.Printf("%s\n", yamldata)
default: default:
res := []string{} res := []string{}
for _, item := range uniqueImages { for _, item := range uniqueImages {
@ -892,11 +892,11 @@ func TagImage(profile *config.Profile, source string, target string) error {
if err != nil { if err != nil {
return err return err
} }
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil { if err != nil {
return errors.Wrap(err, "error creating container runtime") return errors.Wrap(err, "error creating container runtime")
} }
err = cruntime.TagImage(source, target) err = crMgr.TagImage(source, target)
if err != nil { if err != nil {
failed = append(failed, m) failed = append(failed, m)
klog.Warningf("Failed to tag image for profile %s %v", pName, err.Error()) klog.Warningf("Failed to tag image for profile %s %v", pName, err.Error())
@ -912,8 +912,8 @@ func TagImage(profile *config.Profile, source string, target string) error {
} }
// pushImages pushes images from the container run time // pushImages pushes images from the container run time
func pushImages(cruntime cruntime.Manager, images []string) error { func pushImages(crMgr cruntime.Manager, imgs []string) error {
klog.Infof("pushImages start: %s", images) klog.Infof("pushImages start: %s", imgs)
start := time.Now() start := time.Now()
defer func() { defer func() {
@ -922,10 +922,10 @@ func pushImages(cruntime cruntime.Manager, images []string) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, img := range imgs {
image := image img := img
g.Go(func() error { g.Go(func() error {
return cruntime.PushImage(image) return crMgr.PushImage(img)
}) })
} }
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
@ -973,11 +973,11 @@ func PushImages(images []string, profile *config.Profile) error {
if err != nil { if err != nil {
return err return err
} }
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner}) crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil { if err != nil {
return errors.Wrap(err, "error creating container runtime") return errors.Wrap(err, "error creating container runtime")
} }
err = pushImages(cruntime, images) err = pushImages(crMgr, images)
if err != nil { if err != nil {
failed = append(failed, m) failed = append(failed, m)
klog.Warningf("Failed to push image for profile %s %v", pName, err.Error()) klog.Warningf("Failed to push image for profile %s %v", pName, err.Error())

View File

@ -37,7 +37,7 @@ import (
// deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file // deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file
// used as last effort clean up not returning errors, won't warn user. // used as last effort clean up not returning errors, won't warn user.
func deleteOrphanedKIC(ociBin string, name string) { func deleteOrphanedKIC(ociBin string, name string) {
if !(ociBin == oci.Podman || ociBin == oci.Docker) { if ociBin != oci.Podman && ociBin != oci.Docker {
return return
} }
@ -68,8 +68,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
delAbandoned = deleteAbandoned[0] delAbandoned = deleteAbandoned[0]
} }
host, err := api.Load(machineName) hostInfo, err := api.Load(machineName)
if err != nil && host == nil && delAbandoned { if err != nil && hostInfo == nil && delAbandoned {
deleteOrphanedKIC(oci.Docker, machineName) deleteOrphanedKIC(oci.Docker, machineName)
deleteOrphanedKIC(oci.Podman, machineName) deleteOrphanedKIC(oci.Podman, machineName)
// Keep going even if minikube does not know about the host // Keep going even if minikube does not know about the host
@ -88,7 +88,7 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
} }
// some drivers need manual shut down before delete to avoid getting stuck. // some drivers need manual shut down before delete to avoid getting stuck.
if driver.NeedsShutdown(host.Driver.DriverName()) { if driver.NeedsShutdown(hostInfo.Driver.DriverName()) {
if err := StopHost(api, machineName); err != nil { if err := StopHost(api, machineName); err != nil {
klog.Warningf("stop host: %v", err) klog.Warningf("stop host: %v", err)
} }
@ -96,8 +96,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": hostInfo.DriverName})
return deleteHost(api, host, machineName) return deleteHost(api, hostInfo, machineName)
} }
// delete removes a host and its local data files // delete removes a host and its local data files

View File

@ -209,12 +209,12 @@ func ensureSyncedGuestClock(h hostRunner, drv string) error {
// guestClockDelta returns the approximate difference between the host and guest system clock // guestClockDelta returns the approximate difference between the host and guest system clock
// NOTE: This does not currently take into account ssh latency. // NOTE: This does not currently take into account ssh latency.
func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) { func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) {
out, err := h.RunSSHCommand("date +%s.%N") rest, err := h.RunSSHCommand("date +%s.%N")
if err != nil { if err != nil {
return 0, errors.Wrap(err, "get clock") return 0, errors.Wrap(err, "get clock")
} }
klog.Infof("guest clock: %s", out) klog.Infof("guest clock: %s", rest)
ns := strings.Split(strings.TrimSpace(out), ".") ns := strings.Split(strings.TrimSpace(rest), ".")
secs, err := strconv.ParseInt(strings.TrimSpace(ns[0]), 10, 64) secs, err := strconv.ParseInt(strings.TrimSpace(ns[0]), 10, 64)
if err != nil { if err != nil {
return 0, errors.Wrap(err, "atoi") return 0, errors.Wrap(err, "atoi")
@ -232,8 +232,8 @@ func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) {
// adjustGuestClock adjusts the guest system clock to be nearer to the host system clock // adjustGuestClock adjusts the guest system clock to be nearer to the host system clock
func adjustGuestClock(h hostRunner, t time.Time) error { func adjustGuestClock(h hostRunner, t time.Time) error {
out, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix())) rest, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix()))
klog.Infof("clock set: %s (err=%v)", out, err) klog.Infof("clock set: %s (err=%v)", rest, err)
return err return err
} }
@ -253,10 +253,12 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
} }
func machineExistsDocker(s state.State, err error) (bool, error) { func machineExistsDocker(s state.State, err error) (bool, error) {
if s == state.Error {
switch s {
case state.Error:
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return // if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, constants.ErrMachineMissing return false, constants.ErrMachineMissing
} else if s == state.None { case state.None:
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return // if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, constants.ErrMachineMissing return false, constants.ErrMachineMissing
} }

View File

@ -35,12 +35,12 @@ func Status(api libmachine.API, machineName string) (string, error) {
return state.None.String(), nil return state.None.String(), nil
} }
host, err := api.Load(machineName) hostInfo, err := api.Load(machineName)
if err != nil { if err != nil {
return "", errors.Wrapf(err, "load") return "", errors.Wrapf(err, "load")
} }
s, err := host.Driver.GetState() s, err := hostInfo.Driver.GetState()
if err != nil { if err != nil {
return "", errors.Wrap(err, "state") return "", errors.Wrap(err, "state")
} }

View File

@ -193,11 +193,11 @@ func cachedCPUInfo() ([]cpu.InfoStat, error) {
} }
// ParseMemFree parses the output of the `free -m` command // ParseMemFree parses the output of the `free -m` command
func parseMemFree(out string) (int64, error) { func parseMemFree(s string) (int64, error) {
// total used free shared buff/cache available // total used free shared buff/cache available
//Mem: 1987 706 194 1 1086 1173 //Mem: 1987 706 194 1 1086 1173
//Swap: 0 0 0 //Swap: 0 0 0
outlines := strings.Split(out, "\n") outlines := strings.Split(s, "\n")
l := len(outlines) l := len(outlines)
for _, line := range outlines[1 : l-1] { for _, line := range outlines[1 : l-1] {
parsedLine := strings.Fields(line) parsedLine := strings.Fields(line)
@ -217,10 +217,10 @@ func parseMemFree(out string) (int64, error) {
} }
// ParseDiskFree parses the output of the `df -m` command // ParseDiskFree parses the output of the `df -m` command
func parseDiskFree(out string) (int64, error) { func parseDiskFree(s string) (int64, error) {
// Filesystem 1M-blocks Used Available Use% Mounted on // Filesystem 1M-blocks Used Available Use% Mounted on
// /dev/sda1 39643 3705 35922 10% / // /dev/sda1 39643 3705 35922 10% /
outlines := strings.Split(out, "\n") outlines := strings.Split(s, "\n")
l := len(outlines) l := len(outlines)
for _, line := range outlines[1 : l-1] { for _, line := range outlines[1 : l-1] {
parsedLine := strings.Fields(line) parsedLine := strings.Fields(line)

View File

@ -31,12 +31,12 @@ import (
// GetHost find node's host information by name in the given cluster. // GetHost find node's host information by name in the given cluster.
func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
machineName := config.MachineName(cc, n) machineName := config.MachineName(cc, n)
host, err := LoadHost(api, machineName) hostInfo, err := LoadHost(api, machineName)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "host exists and load") return nil, errors.Wrap(err, "host exists and load")
} }
currentState, err := host.Driver.GetState() currentState, err := hostInfo.Driver.GetState()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "state") return nil, errors.Wrap(err, "state")
} }
@ -45,12 +45,12 @@ func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
return nil, errors.Errorf("%q is not running", machineName) return nil, errors.Errorf("%q is not running", machineName)
} }
return host, nil return hostInfo, nil
} }
// CreateSSHShell creates a new SSH shell / client // CreateSSHShell creates a new SSH shell / client
func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string, native bool) error { func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string, native bool) error {
host, err := GetHost(api, cc, n) hostInfo, err := GetHost(api, cc, n)
if err != nil { if err != nil {
return err return err
} }
@ -61,7 +61,7 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node,
ssh.SetDefaultClient(ssh.External) ssh.SetDefaultClient(ssh.External)
} }
client, err := host.CreateSSHClient() client, err := hostInfo.CreateSSHClient()
if err != nil { if err != nil {
return errors.Wrap(err, "Creating ssh client") return errors.Wrap(err, "Creating ssh client")
@ -71,16 +71,16 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node,
// GetSSHHostAddrPort returns the host address and port for ssh // GetSSHHostAddrPort returns the host address and port for ssh
func GetSSHHostAddrPort(api libmachine.API, cc config.ClusterConfig, n config.Node) (string, int, error) { func GetSSHHostAddrPort(api libmachine.API, cc config.ClusterConfig, n config.Node) (string, int, error) {
host, err := GetHost(api, cc, n) hostInfo, err := GetHost(api, cc, n)
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }
addr, err := host.Driver.GetSSHHostname() addr, err := hostInfo.Driver.GetSSHHostname()
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }
port, err := host.Driver.GetSSHPort() port, err := hostInfo.Driver.GetSSHPort()
if err != nil { if err != nil {
return "", 0, err return "", 0, err
} }

View File

@ -412,7 +412,7 @@ func AddHostAlias(c command.Runner, name string, ip net.IP) error {
return nil return nil
} }
func addHostAliasCommand(name string, record string, sudo bool, path string) *exec.Cmd { func addHostAliasCommand(name string, record string, sudo bool, destPath string) *exec.Cmd {
sudoCmd := "sudo" sudoCmd := "sudo"
if !sudo { // for testing if !sudo { // for testing
sudoCmd = "" sudoCmd = ""
@ -421,9 +421,9 @@ func addHostAliasCommand(name string, record string, sudo bool, path string) *ex
script := fmt.Sprintf( script := fmt.Sprintf(
`{ grep -v $'\t%s$' "%s"; echo "%s"; } > /tmp/h.$$; %s cp /tmp/h.$$ "%s"`, `{ grep -v $'\t%s$' "%s"; echo "%s"; } > /tmp/h.$$; %s cp /tmp/h.$$ "%s"`,
name, name,
path, destPath,
record, record,
sudoCmd, sudoCmd,
path) destPath)
return exec.Command("/bin/bash", "-c", script) return exec.Command("/bin/bash", "-c", script)
} }

View File

@ -95,9 +95,9 @@ func trySSHPowerOff(h *host.Host) error {
err := oci.ShutDown(h.DriverName, h.Name) err := oci.ShutDown(h.DriverName, h.Name)
klog.Infof("shutdown container: err=%v", err) klog.Infof("shutdown container: err=%v", err)
} else { } else {
out, err := h.RunSSHCommand("sudo poweroff") rest, err := h.RunSSHCommand("sudo poweroff")
// poweroff always results in an error, since the host disconnects. // poweroff always results in an error, since the host disconnects.
klog.Infof("poweroff result: out=%s, err=%v", out, err) klog.Infof("poweroff result: out=%s, err=%v", rest, err)
} }
return nil return nil
} }

View File

@ -132,7 +132,7 @@ func running(name string, first bool) []ClusterController {
continue continue
} }
host, err := machine.LoadHost(api, machineName) hostInfo, err := machine.LoadHost(api, machineName)
if err != nil { if err != nil {
if last { if last {
exit.Message(reason.GuestLoadHost, `Unable to load control-plane node {{.name}} host: {{.err}}`, out.V{"name": machineName, "err": err}) exit.Message(reason.GuestLoadHost, `Unable to load control-plane node {{.name}} host: {{.err}}`, out.V{"name": machineName, "err": err})
@ -141,7 +141,7 @@ func running(name string, first bool) []ClusterController {
continue continue
} }
cr, err := machine.CommandRunner(host) cr, err := machine.CommandRunner(hostInfo)
if err != nil { if err != nil {
if last { if last {
exit.Message(reason.InternalCommandRunner, `Unable to get control-plane node {{.name}} host command runner: {{.err}}`, out.V{"name": machineName, "err": err}) exit.Message(reason.InternalCommandRunner, `Unable to get control-plane node {{.name}} host command runner: {{.err}}`, out.V{"name": machineName, "err": err})
@ -150,7 +150,7 @@ func running(name string, first bool) []ClusterController {
continue continue
} }
hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName) hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, hostInfo.DriverName)
if err != nil { if err != nil {
if last { if last {
exit.Message(reason.DrvCPEndpoint, `Unable to get control-plane node {{.name}} endpoint: {{.err}}`, out.V{"name": machineName, "err": err}) exit.Message(reason.DrvCPEndpoint, `Unable to get control-plane node {{.name}} endpoint: {{.err}}`, out.V{"name": machineName, "err": err})
@ -164,7 +164,7 @@ func running(name string, first bool) []ClusterController {
Config: cc, Config: cc,
CP: ControlPlane{ CP: ControlPlane{
Runner: cr, Runner: cr,
Host: host, Host: hostInfo,
Node: &cp, Node: &cp,
Hostname: hostname, Hostname: hostname,
IP: ip, IP: ip,
@ -223,8 +223,8 @@ func Healthy(name string) ClusterController {
// exitTip returns an action tip and exits // exitTip returns an action tip and exits
func exitTip(action string, profile string, code int) { func exitTip(action string, profile string, code int) {
command := ExampleCmd(profile, action) cmd := ExampleCmd(profile, action)
out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command}) out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": cmd})
exit.Code(code) exit.Code(code)
} }

View File

@ -282,12 +282,12 @@ func imagesInConfigFile() ([]string, error) {
} }
func updateKicImageRepo(imgName string, repo string) string { func updateKicImageRepo(imgName string, repo string) string {
image := strings.TrimPrefix(imgName, "gcr.io/") imageName := strings.TrimPrefix(imgName, "gcr.io/")
if repo == constants.AliyunMirror { if repo == constants.AliyunMirror {
// for aliyun registry must strip namespace from image name, e.g. // for aliyun registry must strip namespace from image name, e.g.
// registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-minikube/kicbase:v0.0.25 will not work // registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-minikube/kicbase:v0.0.25 will not work
// registry.cn-hangzhou.aliyuncs.com/google_containers/kicbase:v0.0.25 does work // registry.cn-hangzhou.aliyuncs.com/google_containers/kicbase:v0.0.25 does work
image = strings.TrimPrefix(image, "k8s-minikube/") imageName = strings.TrimPrefix(imageName, "k8s-minikube/")
} }
return path.Join(repo, image) return path.Join(repo, imageName)
} }

View File

@ -522,8 +522,8 @@ func cgroupDriver(cc config.ClusterConfig) string {
return detect.CgroupDriver() return detect.CgroupDriver()
} }
func pathExists(runner cruntime.CommandRunner, path string) (bool, error) { func pathExists(runner cruntime.CommandRunner, p string) (bool, error) {
_, err := runner.RunCmd(exec.Command("stat", path)) _, err := runner.RunCmd(exec.Command("stat", p))
if err == nil { if err == nil {
return true, nil return true, nil
} }
@ -624,18 +624,18 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node,
// setupKubeconfig generates kubeconfig. // setupKubeconfig generates kubeconfig.
func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings { func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings {
host := cc.KubernetesConfig.APIServerHAVIP hostIP := cc.KubernetesConfig.APIServerHAVIP
port := cc.APIServerPort port := cc.APIServerPort
if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) { if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) {
var err error var err error
if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil { if hostIP, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil {
exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)}) exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)})
} }
} }
addr := fmt.Sprintf("https://%s", net.JoinHostPort(host, strconv.Itoa(port))) addr := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, strconv.Itoa(port)))
if cc.KubernetesConfig.APIServerName != constants.APIServerName { if cc.KubernetesConfig.APIServerName != constants.APIServerName {
addr = strings.ReplaceAll(addr, host, cc.KubernetesConfig.APIServerName) addr = strings.ReplaceAll(addr, hostIP, cc.KubernetesConfig.APIServerName)
} }
kcs := &kubeconfig.Settings{ kcs := &kubeconfig.Settings{
@ -654,29 +654,29 @@ func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, cluste
} }
// StartMachine starts a VM // StartMachine starts a VM
func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) { func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, hostInfo *host.Host, err error) {
m, err := machine.NewAPIClient() m, err := machine.NewAPIClient()
if err != nil { if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client") return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get machine client")
} }
host, preExists, err = startHostInternal(m, cfg, node, delOnFail) hostInfo, preExists, err = startHostInternal(m, cfg, node, delOnFail)
if err != nil { if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to start host") return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to start host")
} }
runner, err = machine.CommandRunner(host) runner, err = machine.CommandRunner(hostInfo)
if err != nil { if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner") return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get command runner")
} }
ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository) ip, err := validateNetwork(hostInfo, runner, cfg.KubernetesConfig.ImageRepository)
if err != nil { if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network") return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to validate network")
} }
if driver.IsQEMU(host.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) { if driver.IsQEMU(hostInfo.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) {
apiServerPort, err := getPort() apiServerPort, err := getPort()
if err != nil { if err != nil {
return runner, preExists, m, host, errors.Wrap(err, "Failed to find apiserver port") return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to find apiserver port")
} }
cfg.APIServerPort = apiServerPort cfg.APIServerPort = apiServerPort
} }
@ -687,7 +687,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool)
out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip}) out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
} }
return runner, preExists, m, host, err return runner, preExists, m, hostInfo, err
} }
// getPort asks the kernel for a free open port that is ready to use // getPort asks the kernel for a free open port that is ready to use
@ -707,9 +707,9 @@ func getPort() (int, error) {
// startHostInternal starts a new minikube host using a VM or None // startHostInternal starts a new minikube host using a VM or None
func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.Node, delOnFail bool) (*host.Host, bool, error) { func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.Node, delOnFail bool) (*host.Host, bool, error) {
host, exists, err := machine.StartHost(api, cc, n) hostInfo, exists, err := machine.StartHost(api, cc, n)
if err == nil { if err == nil {
return host, exists, nil return hostInfo, exists, nil
} }
klog.Warningf("error starting host: %v", err) klog.Warningf("error starting host: %v", err)
// NOTE: People get very cranky if you delete their preexisting VM. Only delete new ones. // NOTE: People get very cranky if you delete their preexisting VM. Only delete new ones.
@ -722,7 +722,7 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N
if err, ff := errors.Cause(err).(*oci.FailFastError); ff { if err, ff := errors.Cause(err).(*oci.FailFastError); ff {
klog.Infof("will skip retrying to create machine because error is not retriable: %v", err) klog.Infof("will skip retrying to create machine because error is not retriable: %v", err)
return host, exists, err return hostInfo, exists, err
} }
out.ErrT(style.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err}) out.ErrT(style.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err})
@ -739,15 +739,15 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N
} }
} }
host, exists, err = machine.StartHost(api, cc, n) hostInfo, exists, err = machine.StartHost(api, cc, n)
if err == nil { if err == nil {
return host, exists, nil return hostInfo, exists, nil
} }
// Don't use host.Driver to avoid nil pointer deref // Don't use host.Driver to avoid nil pointer deref
drv := cc.Driver drv := cc.Driver
out.ErrT(style.Sad, `Failed to start {{.driver}} {{.driver_type}}. Running "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "delete"), "error": err}) out.ErrT(style.Sad, `Failed to start {{.driver}} {{.driver_type}}. Running "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "delete"), "error": err})
return host, exists, err return hostInfo, exists, err
} }
// validateNetwork tries to catch network problems as soon as possible // validateNetwork tries to catch network problems as soon as possible
@ -760,7 +760,8 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st
optSeen := false optSeen := false
warnedOnce := false warnedOnce := false
for _, k := range proxy.EnvVars { for _, k := range proxy.EnvVars {
if v := os.Getenv(k); v != "" { v := os.Getenv(k)
if v != "" {
if !optSeen { if !optSeen {
out.Styled(style.Internet, "Found network options:") out.Styled(style.Internet, "Found network options:")
optSeen = true optSeen = true
@ -847,9 +848,9 @@ func tryRegistry(r command.Runner, driverName, imageRepository, ip string) {
// 2 second timeout. For best results, call tryRegistry in a non-blocking manner. // 2 second timeout. For best results, call tryRegistry in a non-blocking manner.
opts := []string{"-sS", "-m", "2"} opts := []string{"-sS", "-m", "2"}
proxy := os.Getenv("HTTPS_PROXY") httpsProxy := os.Getenv("HTTPS_PROXY")
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") { if httpsProxy != "" && !strings.HasPrefix(httpsProxy, "localhost") && !strings.HasPrefix(httpsProxy, "127.0") {
opts = append([]string{"-x", proxy}, opts...) opts = append([]string{"-x", httpsProxy}, opts...)
} }
if imageRepository == "" { if imageRepository == "" {
@ -931,16 +932,16 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo
// get current coredns configmap via kubectl // get current coredns configmap via kubectl
get := fmt.Sprintf("sudo %s --kubeconfig=%s -n kube-system get configmap coredns -o yaml", kubectl, kubecfg) get := fmt.Sprintf("sudo %s --kubeconfig=%s -n kube-system get configmap coredns -o yaml", kubectl, kubecfg)
out, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get)) rest, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get))
if err != nil { if err != nil {
klog.Errorf("failed to get current CoreDNS ConfigMap: %v", err) klog.Errorf("failed to get current CoreDNS ConfigMap: %v", err)
return err return err
} }
cm := strings.TrimSpace(out.Stdout.String()) cm := strings.TrimSpace(rest.Stdout.String())
// check if this specific host entry already exists in coredns configmap, so not to duplicate/override it // check if this specific host entry already exists in coredns configmap, so not to duplicate/override it
host := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name)) hostInfo := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name))
if host.MatchString(cm) { if hostInfo.MatchString(cm) {
klog.Infof("CoreDNS already contains %q host record, skipping...", name) klog.Infof("CoreDNS already contains %q host record, skipping...", name)
return nil return nil
} }
@ -956,8 +957,8 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo
} }
// check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it // check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it
logs := regexp.MustCompile(`(?smU)^ *log *$`) regex := regexp.MustCompile(`(?smU)^ *log *$`)
if !logs.MatchString(cm) { if !regex.MatchString(cm) {
// inject log plugin into coredns configmap // inject log plugin into coredns configmap
sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed) sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed)
} }

View File

@ -96,21 +96,21 @@ func maybePrintBetaUpdateText(betaReleasesURL string, localVersion semver.Versio
return true return true
} }
func printUpdateTextCommon(version semver.Version) { func printUpdateTextCommon(ver semver.Version) {
if err := writeTimeToFile(lastUpdateCheckFilePath, time.Now().UTC()); err != nil { if err := writeTimeToFile(lastUpdateCheckFilePath, time.Now().UTC()); err != nil {
klog.Errorf("write time failed: %v", err) klog.Errorf("write time failed: %v", err)
} }
url := "https://github.com/kubernetes/minikube/releases/tag/v" + version.String() url := "https://github.com/kubernetes/minikube/releases/tag/v" + ver.String()
out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": version, "url": url}) out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": ver, "url": url})
} }
func printUpdateText(version semver.Version) { func printUpdateText(ver semver.Version) {
printUpdateTextCommon(version) printUpdateTextCommon(ver)
out.Styled(style.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n") out.Styled(style.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n")
} }
func printBetaUpdateText(version semver.Version) { func printBetaUpdateText(ver semver.Version) {
printUpdateTextCommon(version) printUpdateTextCommon(ver)
out.Styled(style.Tip, "To disable beta notices, run: 'minikube config set WantBetaUpdateNotification false'") out.Styled(style.Tip, "To disable beta notices, run: 'minikube config set WantBetaUpdateNotification false'")
out.Styled(style.Tip, "To disable update notices in general, run: 'minikube config set WantUpdateNotification false'\n") out.Styled(style.Tip, "To disable update notices in general, run: 'minikube config set WantUpdateNotification false'\n")
} }
@ -248,14 +248,14 @@ func timeFromFileIfExists(path string) time.Time {
} }
// DownloadURL returns a URL to get minikube binary version ver for platform os/arch // DownloadURL returns a URL to get minikube binary version ver for platform os/arch
func DownloadURL(ver, os, arch string) string { func DownloadURL(ver, osName, arch string) string {
if ver == "" || strings.HasSuffix(ver, "-unset") || os == "" || arch == "" { if ver == "" || strings.HasSuffix(ver, "-unset") || osName == "" || arch == "" {
return "https://github.com/kubernetes/minikube/releases" return "https://github.com/kubernetes/minikube/releases"
} }
sfx := "" sfx := ""
if os == "windows" { if osName == "windows" {
sfx = ".exe" sfx = ".exe"
} }
return fmt.Sprintf("https://github.com/kubernetes/minikube/releases/download/%s/minikube-%s-%s%s", return fmt.Sprintf("https://github.com/kubernetes/minikube/releases/download/%s/minikube-%s-%s%s",
ver, os, arch, sfx) ver, osName, arch, sfx)
} }

View File

@ -107,8 +107,8 @@ func Styled(st style.Enum, format string, a ...V) {
Infof(format, a...) Infof(format, a...)
return return
} }
outStyled, spinner := stylized(st, useColor, format, a...) outStyled, useSpinner := stylized(st, useColor, format, a...)
if spinner { if useSpinner {
spinnerString(outStyled) spinnerString(outStyled)
} else { } else {
String(outStyled) String(outStyled)
@ -116,12 +116,12 @@ func Styled(st style.Enum, format string, a ...V) {
} }
func boxedCommon(printFunc func(format string, a ...interface{}), cfg box.Config, title string, format string, a ...V) { func boxedCommon(printFunc func(format string, a ...interface{}), cfg box.Config, title string, format string, a ...V) {
box := box.New(cfg) b := box.New(cfg)
if !useColor { if !useColor {
box.Config.Color = nil b.Config.Color = nil
} }
str := Sprintf(style.None, format, a...) str := Sprintf(style.None, format, a...)
printFunc(box.String(title, strings.TrimSpace(str))) printFunc(b.String(title, strings.TrimSpace(str)))
} }
// Boxed writes a stylized and templated message in a box to stdout using the default style config // Boxed writes a stylized and templated message in a box to stdout using the default style config

View File

@ -51,13 +51,13 @@ func timeCommandLogs(cmd *exec.Cmd) (*result, error) {
var timings []float64 var timings []float64
for scanner.Scan() { for scanner.Scan() {
log := scanner.Text() logData := scanner.Text()
// this is the time it took to complete the previous log // this is the time it took to complete the previous log
timeTaken := time.Since(timer).Seconds() timeTaken := time.Since(timer).Seconds()
klog.Infof("%f: %s", timeTaken, log) klog.Infof("%f: %s", timeTaken, logData)
timer = time.Now() timer = time.Now()
logs = append(logs, log) logs = append(logs, logData)
timings = append(timings, timeTaken) timings = append(timings, timeTaken)
} }
// add the time it took to get from the final log to finishing the command // add the time it took to get from the final log to finishing the command

View File

@ -63,12 +63,12 @@ func CompareMinikubeStart(ctx context.Context, binaries []*Binary) error {
return nil return nil
} }
func collectResults(ctx context.Context, binaries []*Binary, driver string, runtime string) (*resultManager, error) { func collectResults(ctx context.Context, binaries []*Binary, driver string, runtimeName string) (*resultManager, error) {
rm := newResultManager() rm := newResultManager()
for run := 0; run < runs; run++ { for run := 0; run < runs; run++ {
log.Printf("Executing run %d/%d...", run+1, runs) log.Printf("Executing run %d/%d...", run+1, runs)
for _, binary := range binaries { for _, binary := range binaries {
r, err := timeMinikubeStart(ctx, binary, driver, runtime) r, err := timeMinikubeStart(ctx, binary, driver, runtimeName)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name()) return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name())
} }
@ -97,9 +97,9 @@ func average(nums []float64) float64 {
return total / float64(len(nums)) return total / float64(len(nums))
} }
func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtime string) error { func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtimeName string) error {
for _, b := range binaries { for _, b := range binaries {
c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime)) c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName))
c.Stderr = os.Stderr c.Stderr = os.Stderr
log.Printf("Running: %v...", c.Args) log.Printf("Running: %v...", c.Args)
if err := c.Run(); err != nil { if err := c.Run(); err != nil {
@ -115,8 +115,8 @@ func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, r
} }
// timeMinikubeStart returns the time it takes to execute `minikube start` // timeMinikubeStart returns the time it takes to execute `minikube start`
func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtime string) (*result, error) { func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtimeName string) (*result, error) {
startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime)) startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName))
startCmd.Stderr = os.Stderr startCmd.Stderr = os.Stderr
r, err := timeCommandLogs(startCmd) r, err := timeCommandLogs(startCmd)
@ -147,6 +147,6 @@ func skipIngress(driver string) bool {
// We only want to run the tests if: // We only want to run the tests if:
// 1. It's a VM driver and docker container runtime // 1. It's a VM driver and docker container runtime
// 2. It's docker driver with any container runtime // 2. It's docker driver with any container runtime
func proceed(driver string, runtime string) bool { func proceed(driver string, runtimeName string) bool {
return runtime == "docker" || driver == "docker" return runtimeName == "docker" || driver == "docker"
} }

View File

@ -66,8 +66,8 @@ func init() {
} }
// GetCoreClient returns a core client // GetCoreClient returns a core client
func (k *K8sClientGetter) GetCoreClient(context string) (typed_core.CoreV1Interface, error) { func (k *K8sClientGetter) GetCoreClient(ctx string) (typed_core.CoreV1Interface, error) {
client, err := kapi.Client(context) client, err := kapi.Client(ctx)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "client") return nil, errors.Wrap(err, "client")
} }
@ -288,8 +288,8 @@ func WaitForService(api libmachine.API, cname string, namespace string, service
} }
for _, bareURLString := range serviceURL.URLs { for _, bareURLString := range serviceURL.URLs {
url, _ := OptionallyHTTPSFormattedURLString(bareURLString, https) urlString, _ := OptionallyHTTPSFormattedURLString(bareURLString, https)
urlList = append(urlList, url) urlList = append(urlList, urlString)
} }
return urlList, nil return urlList, nil
} }
@ -314,7 +314,7 @@ func getServiceListFromServicesByLabel(services typed_core.ServiceInterface, key
} }
// CreateSecret creates or modifies secrets // CreateSecret creates or modifies secrets
func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labels map[string]string) error { func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labelData map[string]string) error {
client, err := K8s.GetCoreClient(cname) client, err := K8s.GetCoreClient(cname)
if err != nil { if err != nil {
return &retry.RetriableError{Err: err} return &retry.RetriableError{Err: err}
@ -344,7 +344,7 @@ func CreateSecret(cname string, namespace, name string, dataValues map[string]st
secretObj := &core.Secret{ secretObj := &core.Secret{
ObjectMeta: meta.ObjectMeta{ ObjectMeta: meta.ObjectMeta{
Name: name, Name: name,
Labels: labels, Labels: labelData,
}, },
Data: data, Data: data,
Type: core.SecretTypeOpaque, Type: core.SecretTypeOpaque,

View File

@ -165,11 +165,11 @@ func Detect() (string, error) {
} }
func (c EnvConfig) getShell() shellData { func (c EnvConfig) getShell() shellData {
shell, ok := shellConfigMap[c.Shell] shellData, ok := shellConfigMap[c.Shell]
if !ok { if !ok {
shell = defaultShell shellData = defaultShell
} }
return shell return shellData
} }
func generateUsageHint(ec EnvConfig, usgPlz, usgCmd string) string { func generateUsageHint(ec EnvConfig, usgPlz, usgCmd string) string {

View File

@ -71,8 +71,8 @@ func SetDefaultStorageClass(storage storagev1.StorageV1Interface, name string) e
} }
// GetStoragev1 return storage v1 interface for client // GetStoragev1 return storage v1 interface for client
func GetStoragev1(context string) (storagev1.StorageV1Interface, error) { func GetStoragev1(ctx string) (storagev1.StorageV1Interface, error) {
client, err := kapi.Client(context) client, err := kapi.Client(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -137,10 +137,10 @@ func (api *MockAPI) Remove(name string) error {
} }
// Save saves a host to disk. // Save saves a host to disk.
func (api *MockAPI) Save(host *host.Host) error { func (api *MockAPI) Save(hostInfo *host.Host) error {
api.SaveCalled = true api.SaveCalled = true
api.Logf("MockAPI.Save: %+v", host) api.Logf("MockAPI.Save: %+v", hostInfo)
return api.FakeStore.Save(host) return api.FakeStore.Save(hostInfo)
} }
// GetMachinesDir returns the directory to store machines in. // GetMachinesDir returns the directory to store machines in.

View File

@ -80,10 +80,10 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) {
return hostState, route, nil return hostState, route, nil
} }
func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, error) { func getRoute(hostInfo *host.Host, clusterConfig config.ClusterConfig) (*Route, error) {
hostDriverIP, err := host.Driver.GetIP() hostDriverIP, err := hostInfo.Driver.GetIP()
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting host IP for %s", host.Name) return nil, errors.Wrapf(err, "error getting host IP for %s", hostInfo.Name)
} }
_, ipNet, err := net.ParseCIDR(clusterConfig.KubernetesConfig.ServiceCIDR) _, ipNet, err := net.ParseCIDR(clusterConfig.KubernetesConfig.ServiceCIDR)

View File

@ -28,7 +28,8 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
) )
// ServiceTunnel ... // ServiceTunnel manages an SSH tunnel for a Kubernetes service.
// It holds configuration for the SSH connection and the tunnel's state.
type ServiceTunnel struct { type ServiceTunnel struct {
sshPort string sshPort string
sshKey string sshKey string
@ -37,7 +38,11 @@ type ServiceTunnel struct {
suppressStdOut bool suppressStdOut bool
} }
// NewServiceTunnel ... // NewServiceTunnel creates and returns a new ServiceTunnel instance.
// sshPort is the port number for the SSH connection.
// sshKey is the path to the SSH private key file.
// v1Core is the Kubernetes CoreV1 client interface for interacting with services.
// suppressStdOut controls whether standard output from the tunnel process should be suppressed.
func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface, suppressStdOut bool) *ServiceTunnel { func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface, suppressStdOut bool) *ServiceTunnel {
return &ServiceTunnel{ return &ServiceTunnel{
sshPort: sshPort, sshPort: sshPort,
@ -47,7 +52,12 @@ func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface,
} }
} }
// Start ... // Start establishes an SSH tunnel for the specified Kubernetes service.
// It retrieves service details, creates an SSH connection with random local ports
// for each service port, and starts the tunnel in a new goroutine.
// It returns a slice of URLs (e.g., "http://127.0.0.1:local_port") corresponding
// to the tunnelled ports, or an error if the setup fails.
// Errors from the tunnel running in the background are logged via klog.
func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) { func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) {
svc, err := t.v1Core.Services(namespace).Get(context.Background(), svcName, metav1.GetOptions{}) svc, err := t.v1Core.Services(namespace).Get(context.Background(), svcName, metav1.GetOptions{})
if err != nil { if err != nil {
@ -75,7 +85,8 @@ func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) {
return urls, nil return urls, nil
} }
// Stop ... // Stop attempts to gracefully stop the active SSH tunnel.
// Any errors encountered during the stop process are logged as warnings.
func (t *ServiceTunnel) Stop() { func (t *ServiceTunnel) Stop() {
err := t.sshConn.stop() err := t.sshConn.stop()
if err != nil { if err != nil {

View File

@ -130,20 +130,20 @@ func createSSHConnWithRandomPorts(name, sshPort, sshKey string, svc *v1.Service)
usedPorts := make([]int, 0, len(svc.Spec.Ports)) usedPorts := make([]int, 0, len(svc.Spec.Ports))
for _, port := range svc.Spec.Ports { for _, port := range svc.Spec.Ports {
freeport, err := freeport.GetFreePort() freePort, err := freeport.GetFreePort()
if err != nil { if err != nil {
return nil, err return nil, err
} }
arg := fmt.Sprintf( arg := fmt.Sprintf(
"-L %d:%s:%d", "-L %d:%s:%d",
freeport, freePort,
svc.Spec.ClusterIP, svc.Spec.ClusterIP,
port.Port, port.Port,
) )
sshArgs = append(sshArgs, arg) sshArgs = append(sshArgs, arg)
usedPorts = append(usedPorts, freeport) usedPorts = append(usedPorts, freePort)
} }
cmd := exec.Command("ssh", sshArgs...) cmd := exec.Command("ssh", sshArgs...)

View File

@ -32,7 +32,9 @@ import (
"k8s.io/minikube/pkg/minikube/tunnel" "k8s.io/minikube/pkg/minikube/tunnel"
) )
// SSHTunnel ... // SSHTunnel manages and reconciles SSH tunnels for Kubernetes Services
// (specifically type LoadBalancer) and Ingress resources. It periodically
// checks the cluster state and creates, maintains, or removes tunnels as needed.
type SSHTunnel struct { type SSHTunnel struct {
ctx context.Context ctx context.Context
sshPort string sshPort string
@ -45,7 +47,13 @@ type SSHTunnel struct {
connsToStop map[string]*sshConn connsToStop map[string]*sshConn
} }
// NewSSHTunnel ... // NewSSHTunnel creates and returns a new SSHTunnel instance.
// ctx is the context that controls the lifecycle of the tunnel manager.
// sshPort is the port number of the SSH server to connect to.
// sshKey is the path to the SSH private key file for authentication.
// bindAddress is the local address on which the tunnels will listen.
// v1Core is a Kubernetes CoreV1 client interface for interacting with Services.
// v1Networking is a Kubernetes NetworkingV1 client interface for interacting with Ingresses.
func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Core typed_core.CoreV1Interface, v1Networking typed_networking.NetworkingV1Interface) *SSHTunnel { func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Core typed_core.CoreV1Interface, v1Networking typed_networking.NetworkingV1Interface) *SSHTunnel {
return &SSHTunnel{ return &SSHTunnel{
ctx: ctx, ctx: ctx,
@ -60,7 +68,12 @@ func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Co
} }
} }
// Start ... // Start begins the main reconciliation loop for the SSHTunnel.
// This loop periodically scans for Kubernetes Services (type LoadBalancer)
// and Ingresses, creating or tearing down SSH tunnels as necessary.
// This method blocks until the provided context (t.ctx) is canceled.
// It returns any error associated with context cancellation or initial setup.
// Runtime errors during the tunnel management loop are logged via klog.
func (t *SSHTunnel) Start() error { func (t *SSHTunnel) Start() error {
for { for {
select { select {

View File

@ -91,17 +91,18 @@ func (r *persistentRegistry) Register(tunnel *ID) (rerr error) {
// tunnels simultaneously. It is possible that an old tunnel // tunnels simultaneously. It is possible that an old tunnel
// from an old profile has duplicated route information so we // from an old profile has duplicated route information so we
// need to check both machine name and route information. // need to check both machine name and route information.
if tunnel.MachineName == t.MachineName && t.Route.Equal(tunnel.Route) { if tunnel.MachineName != t.MachineName || !tunnel.Route.Equal(t.Route) {
isRunning, err := checkIfRunning(t.Pid) continue
if err != nil {
return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err)
}
if isRunning {
return errorTunnelAlreadyExists(t)
}
tunnels[i] = tunnel
alreadyExists = true
} }
isRunning, err := checkIfRunning(t.Pid)
if err != nil {
return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err)
}
if isRunning {
return errorTunnelAlreadyExists(t)
}
tunnels[i] = tunnel
alreadyExists = true
} }
if !alreadyExists { if !alreadyExists {

View File

@ -68,7 +68,7 @@ func (r *simpleReporter) Report(tunnelState *Status) {
loadbalancer emulator: %s loadbalancer emulator: %s
`, minikubeError, routerError, lbError) `, minikubeError, routerError, lbError)
_, err := r.out.Write([]byte(fmt.Sprintf( _, err := fmt.Fprintf(r.out,
`Status: `Status:
machine: %s machine: %s
pid: %d pid: %d
@ -80,7 +80,7 @@ func (r *simpleReporter) Report(tunnelState *Status) {
tunnelState.TunnelID.Route, tunnelState.TunnelID.Route,
minikubeState, minikubeState,
managedServices, managedServices,
errors))) errors)
if err != nil { if err != nil {
klog.Errorf("failed to report state %s", err) klog.Errorf("failed to report state %s", err)
} }

View File

@ -105,7 +105,7 @@ func (t *routingTable) Equal(other *routingTable) bool {
for i := range *t { for i := range *t {
routesEqual := (*t)[i].route.Equal((*other)[i].route) routesEqual := (*t)[i].route.Equal((*other)[i].route)
linesEqual := (*t)[i].line == ((*other)[i].line) linesEqual := (*t)[i].line == ((*other)[i].line)
if !(routesEqual && linesEqual) { if !routesEqual || !linesEqual {
return false return false
} }
} }

View File

@ -57,14 +57,14 @@ var _ controller.Provisioner = &hostPathProvisioner{}
// Provision creates a storage asset and returns a PV object representing it. // Provision creates a storage asset and returns a PV object representing it.
func (p *hostPathProvisioner) Provision(_ context.Context, options controller.ProvisionOptions) (*core.PersistentVolume, controller.ProvisioningState, error) { func (p *hostPathProvisioner) Provision(_ context.Context, options controller.ProvisionOptions) (*core.PersistentVolume, controller.ProvisioningState, error) {
path := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name) hostPath := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name)
klog.Infof("Provisioning volume %v to %s", options, path) klog.Infof("Provisioning volume %v to %s", options, hostPath)
if err := os.MkdirAll(path, 0777); err != nil { if err := os.MkdirAll(hostPath, 0777); err != nil {
return nil, controller.ProvisioningFinished, err return nil, controller.ProvisioningFinished, err
} }
// Explicitly chmod created dir, so we know mode is set to 0777 regardless of umask // Explicitly chmod created dir, so we know mode is set to 0777 regardless of umask
if err := os.Chmod(path, 0777); err != nil { if err := os.Chmod(hostPath, 0777); err != nil {
return nil, controller.ProvisioningFinished, err return nil, controller.ProvisioningFinished, err
} }
@ -83,7 +83,7 @@ func (p *hostPathProvisioner) Provision(_ context.Context, options controller.Pr
}, },
PersistentVolumeSource: core.PersistentVolumeSource{ PersistentVolumeSource: core.PersistentVolumeSource{
HostPath: &core.HostPathVolumeSource{ HostPath: &core.HostPathVolumeSource{
Path: path, Path: hostPath,
}, },
}, },
}, },