build: address all lint issues by v2 (#20804)
* fix QF1011: could omit type *os.File from declaration; it will be inferred from the right-hand side * fix QF1012: Use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...)) * fix QF1001: could apply De Morgan's law * fix QF1003: could use tagged switch * fix weakCond: suspicious ; nil check may not be enough, check for len (gocritic) * fix docStub: silencing go lint doc-comment warnings is unadvised * fix builtinShadow: shadowing of predeclared identifier: error * fix importShadow: shadow of imported package * fix nestingReduce: invert if cond, replace body with , move old body after the statement * useless-break: useless break in case clause (revive) * Clear the redundant content in golangci.yaml filepull/20830/head
parent
f89d46d8a2
commit
3cf1e63e21
|
@ -23,23 +23,8 @@ linters:
|
|||
- "-ST1020"
|
||||
- "-ST1021"
|
||||
- "-ST1022"
|
||||
##### TODO: fix and enable these
|
||||
# 4 occurrences.
|
||||
# Use fmt.Fprintf(x, ...) instead of x.Write(fmt.Sprintf(...)) https://staticcheck.dev/docs/checks#QF1012
|
||||
- "-QF1012"
|
||||
# 3 occurrences.
|
||||
# Apply De Morgan’s law https://staticcheck.dev/docs/checks#QF1001
|
||||
- "-QF1001"
|
||||
# 9 occurrences.
|
||||
# Convert if/else-if chain to tagged switch https://staticcheck.dev/docs/checks#QF1003
|
||||
- "-QF1003"
|
||||
# 1 occurrence.
|
||||
# could omit type *os.File from declaration; it will be inferred from the right-hand side
|
||||
- "-QF1011"
|
||||
##### These have been vetted to be disabled.
|
||||
# 19 occurrences. Omit embedded fields from selector expression https://staticcheck.dev/docs/checks#QF1008
|
||||
# Usefulness is questionable.
|
||||
- "-QF1008"
|
||||
|
||||
revive:
|
||||
enable-all-rules: true
|
||||
rules:
|
||||
|
@ -150,23 +135,22 @@ linters:
|
|||
# - yodaStyleExpr
|
||||
# - typeUnparen
|
||||
|
||||
##### TODO: fix and enable these
|
||||
# We enabled these and we pass
|
||||
- nilValReturn
|
||||
# - weakCond # pkg/minikube/config/profile.go:61:9: weakCond: suspicious `cc.Nodes != nil && cc.Nodes[0].Name == node.Name`; nil check may not be enough, check for len (gocritic)
|
||||
- weakCond
|
||||
- indexAlloc
|
||||
- rangeExprCopy
|
||||
- boolExprSimplify
|
||||
- commentedOutImport
|
||||
# - docStub # pkg/minikube/tunnel/kic/service_tunnel.go:51:1: docStub: silencing go lint doc-comment warnings is unadvised (gocritic)
|
||||
- docStub
|
||||
- emptyFallthrough
|
||||
- hexLiteral
|
||||
- typeAssertChain
|
||||
- unlabelStmt
|
||||
# - builtinShadow # cmd/minikube/cmd/delete.go:89:7: builtinShadow: shadowing of predeclared identifier: error (gocritic)
|
||||
# - importShadow # pkg/storage/storage_provisioner.go:60:2: importShadow: shadow of imported package 'path' (gocritic)
|
||||
- builtinShadow
|
||||
- importShadow
|
||||
- initClause
|
||||
# - nestingReduce # pkg/minikube/tunnel/registry.go:94:3: nestingReduce: invert if cond, replace body with `continue`, move old body after the statement (gocritic)
|
||||
- nestingReduce
|
||||
- unnecessaryBlock
|
||||
|
||||
exclusions:
|
||||
|
@ -181,7 +165,3 @@ linters:
|
|||
- path: '(.+)\.go$'
|
||||
text: "Error return value of `.*` is not checked"
|
||||
linter: errcheck
|
||||
# This code is doubtful and I don't understand it. Location: Line 456
|
||||
- path: 'cmd/minikube/cmd/docker-env.go'
|
||||
text: "useless-break: useless break in case clause"
|
||||
linter: revive
|
||||
|
|
|
@ -162,7 +162,7 @@ func GenerateBashCompletion(w io.Writer, cmd *cobra.Command) error {
|
|||
}
|
||||
|
||||
// GenerateZshCompletion generates the completion for the zsh shell
|
||||
func GenerateZshCompletion(out io.Writer, cmd *cobra.Command) error {
|
||||
func GenerateZshCompletion(w io.Writer, cmd *cobra.Command) error {
|
||||
zshAutoloadTag := `#compdef minikube
|
||||
`
|
||||
|
||||
|
@ -300,17 +300,17 @@ __minikube_convert_bash_to_zsh() {
|
|||
<<'BASH_COMPLETION_EOF'
|
||||
`
|
||||
|
||||
_, err := out.Write([]byte(zshAutoloadTag))
|
||||
_, err := w.Write([]byte(zshAutoloadTag))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = out.Write([]byte(boilerPlate))
|
||||
_, err = w.Write([]byte(boilerPlate))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = out.Write([]byte(zshInitialization))
|
||||
_, err = w.Write([]byte(zshInitialization))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ __minikube_convert_bash_to_zsh() {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "Error generating zsh completion")
|
||||
}
|
||||
_, err = out.Write(buf.Bytes())
|
||||
_, err = w.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ BASH_COMPLETION_EOF
|
|||
}
|
||||
__minikube_bash_source <(__minikube_convert_bash_to_zsh)
|
||||
`
|
||||
_, err = out.Write([]byte(zshTail))
|
||||
_, err = w.Write([]byte(zshTail))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) {
|
|||
type configFile struct {
|
||||
Addons addonConfig `json:"addons"`
|
||||
}
|
||||
var config configFile
|
||||
var cf configFile
|
||||
|
||||
if configFilePath != "" {
|
||||
out.Ln("Reading %s configs from %s", addon, configFilePath)
|
||||
|
@ -150,14 +150,14 @@ func loadAddonConfigFile(addon, configFilePath string) (ac *addonConfig) {
|
|||
fmt.Sprintf("error opening config file: %s", configFilePath))
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(confData, &config); err != nil {
|
||||
if err = json.Unmarshal(confData, &cf); err != nil {
|
||||
// err = errors2.Wrapf(err, "error reading config file (%s)", configFilePath)
|
||||
klog.Errorf("error reading config file (%s): %v", configFilePath, err)
|
||||
exit.Message(reason.Kind{ExitCode: reason.ExProgramConfig, Advice: "provide a valid config file"},
|
||||
fmt.Sprintf("error reading config file: %v", err))
|
||||
}
|
||||
|
||||
return &config.Addons
|
||||
return &cf.Addons
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -92,7 +92,9 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
|
|||
|
||||
regCredsConf := &ac.RegistryCreds
|
||||
awsEcrAction := regCredsConf.EnableAWSEcr // regCredsConf. "enableAWSEcr")
|
||||
if awsEcrAction == "prompt" || awsEcrAction == "" {
|
||||
|
||||
switch awsEcrAction {
|
||||
case "prompt", "":
|
||||
enableAWSECR := AskForYesNoConfirmation("\nDo you want to enable AWS Elastic Container Registry?", posResponses, negResponses)
|
||||
if enableAWSECR {
|
||||
awsAccessID = AskForStaticValue("-- Enter AWS Access Key ID: ")
|
||||
|
@ -102,7 +104,7 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
|
|||
awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID (Comma separated list): ")
|
||||
awsRole = AskForStaticValueOptional("-- (Optional) Enter ARN of AWS role to assume: ")
|
||||
}
|
||||
} else if awsEcrAction == "enable" {
|
||||
case "enable":
|
||||
out.Ln("Loading AWS ECR configs from: %s", addonConfigFile)
|
||||
// Then read the configs
|
||||
awsAccessID = regCredsConf.EcrConfigs.AccessID
|
||||
|
@ -111,15 +113,17 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
|
|||
awsRegion = regCredsConf.EcrConfigs.Region
|
||||
awsAccount = regCredsConf.EcrConfigs.Account
|
||||
awsRole = regCredsConf.EcrConfigs.Role
|
||||
} else if awsEcrAction == "disable" {
|
||||
case "disable":
|
||||
out.Ln("Ignoring AWS ECR configs")
|
||||
} else {
|
||||
default:
|
||||
out.Ln("Disabling AWS ECR. Invalid value for enableAWSEcr (%s). Must be one of 'disable', 'enable' or 'prompt'", awsEcrAction)
|
||||
}
|
||||
|
||||
gcrPath := ""
|
||||
gcrAction := regCredsConf.EnableGCR
|
||||
if gcrAction == "prompt" || gcrAction == "" {
|
||||
|
||||
switch gcrAction {
|
||||
case "prompt", "":
|
||||
enableGCR := AskForYesNoConfirmation("\nDo you want to enable Google Container Registry?", posResponses, negResponses)
|
||||
if enableGCR {
|
||||
gcrPath = AskForStaticValue("-- Enter path to credentials (e.g. /home/user/.config/gcloud/application_default_credentials.json):")
|
||||
|
@ -129,14 +133,14 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
|
|||
gcrURL = AskForStaticValue("-- Enter GCR URL (e.g. https://asia.gcr.io):")
|
||||
}
|
||||
}
|
||||
} else if gcrAction == "enable" {
|
||||
case "enable":
|
||||
out.Ln("Loading GCR configs from: %s", addonConfigFile)
|
||||
// Then read the configs
|
||||
gcrPath = regCredsConf.GcrConfigs.GcrPath
|
||||
gcrURL = regCredsConf.GcrConfigs.GcrURL
|
||||
} else if gcrAction == "disable" {
|
||||
case "disable":
|
||||
out.Ln("Ignoring GCR configs")
|
||||
} else {
|
||||
default:
|
||||
out.Ln("Disabling GCR. Invalid value for enableGCR (%s). Must be one of 'disable', 'enable' or 'prompt'", gcrAction)
|
||||
}
|
||||
|
||||
|
@ -152,40 +156,44 @@ func processRegistryCredsConfig(profile string, ac *addonConfig) {
|
|||
}
|
||||
|
||||
dockerRegistryAction := regCredsConf.EnableDockerRegistry
|
||||
if dockerRegistryAction == "prompt" || dockerRegistryAction == "" {
|
||||
|
||||
switch dockerRegistryAction {
|
||||
case "prompt", "":
|
||||
enableDR := AskForYesNoConfirmation("\nDo you want to enable Docker Registry?", posResponses, negResponses)
|
||||
if enableDR {
|
||||
dockerServer = AskForStaticValue("-- Enter docker registry server url: ")
|
||||
dockerUser = AskForStaticValue("-- Enter docker registry username: ")
|
||||
dockerPass = AskForPasswordValue("-- Enter docker registry password: ")
|
||||
}
|
||||
} else if dockerRegistryAction == "enable" {
|
||||
case "enable":
|
||||
out.Ln("Loading Docker Registry configs from: %s", addonConfigFile)
|
||||
dockerServer = regCredsConf.DockerConfigs.DockerServer
|
||||
dockerUser = regCredsConf.DockerConfigs.DockerUser
|
||||
dockerPass = regCredsConf.DockerConfigs.DockerPass
|
||||
} else if dockerRegistryAction == "disable" {
|
||||
case "disable":
|
||||
out.Ln("Ignoring Docker Registry configs")
|
||||
} else {
|
||||
default:
|
||||
out.Ln("Disabling Docker Registry. Invalid value for enableDockerRegistry (%s). Must be one of 'disable', 'enable' or 'prompt'", dockerRegistryAction)
|
||||
}
|
||||
|
||||
acrAction := regCredsConf.EnableACR
|
||||
if acrAction == "prompt" || acrAction == "" {
|
||||
|
||||
switch acrAction {
|
||||
case "prompt", "":
|
||||
enableACR := AskForYesNoConfirmation("\nDo you want to enable Azure Container Registry?", posResponses, negResponses)
|
||||
if enableACR {
|
||||
acrURL = AskForStaticValue("-- Enter Azure Container Registry (ACR) URL: ")
|
||||
acrClientID = AskForStaticValue("-- Enter client ID (service principal ID) to access ACR: ")
|
||||
acrPassword = AskForPasswordValue("-- Enter service principal password to access Azure Container Registry: ")
|
||||
}
|
||||
} else if acrAction == "enable" {
|
||||
case "enable":
|
||||
out.Ln("Loading ACR configs from: ", addonConfigFile)
|
||||
acrURL = regCredsConf.AcrConfigs.AcrURL
|
||||
acrClientID = regCredsConf.AcrConfigs.AcrClientID
|
||||
acrPassword = regCredsConf.AcrConfigs.AcrPassword
|
||||
} else if acrAction == "disable" {
|
||||
case "disable":
|
||||
out.Ln("Ignoring ACR configs")
|
||||
} else {
|
||||
default:
|
||||
out.Stringf("Disabling ACR. Invalid value for enableACR (%s). Must be one of 'disable', 'enable' or 'prompt'", acrAction)
|
||||
}
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p
|
|||
klog.Infof("Waiting for kubectl to output host:port ...")
|
||||
reader := bufio.NewReader(stdoutPipe)
|
||||
|
||||
var out []byte
|
||||
var outData []byte
|
||||
for {
|
||||
r, timedOut, err := readByteWithTimeout(reader, 5*time.Second)
|
||||
if err != nil {
|
||||
|
@ -170,10 +170,10 @@ func kubectlProxy(kubectlVersion string, binaryURL string, contextName string, p
|
|||
klog.Infof("timed out waiting for input: possibly due to an old kubectl version.")
|
||||
break
|
||||
}
|
||||
out = append(out, r)
|
||||
outData = append(outData, r)
|
||||
}
|
||||
klog.Infof("proxy stdout: %s", string(out))
|
||||
return cmd, hostPortRe.FindString(string(out)), nil
|
||||
klog.Infof("proxy stdout: %s", string(outData))
|
||||
return cmd, hostPortRe.FindString(string(outData)), nil
|
||||
}
|
||||
|
||||
// readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred.
|
||||
|
@ -203,9 +203,9 @@ func readByteWithTimeout(r io.ByteReader, timeout time.Duration) (byte, bool, er
|
|||
}
|
||||
|
||||
// dashboardURL generates a URL for accessing the dashboard service
|
||||
func dashboardURL(proxy string, ns string, svc string) string {
|
||||
func dashboardURL(addr string, ns string, svc string) string {
|
||||
// Reference: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above
|
||||
return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", proxy, ns, svc)
|
||||
return fmt.Sprintf("http://%s/api/v1/namespaces/%s/services/http:%s:/proxy/", addr, ns, svc)
|
||||
}
|
||||
|
||||
// checkURL checks if a URL returns 200 HTTP OK
|
||||
|
|
|
@ -86,8 +86,8 @@ type DeletionError struct {
|
|||
Errtype typeOfError
|
||||
}
|
||||
|
||||
func (error DeletionError) Error() string {
|
||||
return error.Err.Error()
|
||||
func (deletionError DeletionError) Error() string {
|
||||
return deletionError.Err.Error()
|
||||
}
|
||||
|
||||
var hostAndDirsDeleter = func(api libmachine.API, cc *config.ClusterConfig, profileName string) error {
|
||||
|
@ -527,11 +527,11 @@ func uninstallKubernetes(api libmachine.API, cc config.ClusterConfig, n config.N
|
|||
}
|
||||
|
||||
// HandleDeletionErrors handles deletion errors from DeleteProfiles
|
||||
func HandleDeletionErrors(errors []error) {
|
||||
if len(errors) == 1 {
|
||||
handleSingleDeletionError(errors[0])
|
||||
func HandleDeletionErrors(errs []error) {
|
||||
if len(errs) == 1 {
|
||||
handleSingleDeletionError(errs[0])
|
||||
} else {
|
||||
handleMultipleDeletionErrors(errors)
|
||||
handleMultipleDeletionErrors(errs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -556,10 +556,10 @@ func handleSingleDeletionError(err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func handleMultipleDeletionErrors(errors []error) {
|
||||
func handleMultipleDeletionErrors(errs []error) {
|
||||
out.ErrT(style.Sad, "Multiple errors deleting profiles")
|
||||
|
||||
for _, err := range errors {
|
||||
for _, err := range errs {
|
||||
deletionError, ok := err.(DeletionError)
|
||||
|
||||
if ok {
|
||||
|
@ -706,14 +706,14 @@ var isMinikubeProcess = func(pid int) (bool, error) {
|
|||
// getPids opens the file at PATH and tries to read
|
||||
// one or more space separated pids
|
||||
func getPids(path string) ([]int, error) {
|
||||
out, err := os.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ReadFile")
|
||||
}
|
||||
klog.Infof("pidfile contents: %s", out)
|
||||
klog.Infof("pidfile contents: %s", data)
|
||||
|
||||
pids := []int{}
|
||||
strPids := strings.Fields(string(out))
|
||||
strPids := strings.Fields(string(data))
|
||||
for _, p := range strPids {
|
||||
intPid, err := strconv.Atoi(p)
|
||||
if err != nil {
|
||||
|
|
|
@ -462,7 +462,6 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
switch outputFormat {
|
||||
case "":
|
||||
// shell "none"
|
||||
break
|
||||
case "text":
|
||||
for k, v := range envVars {
|
||||
_, err := fmt.Fprintf(w, "%s=%s\n", k, v)
|
||||
|
@ -472,11 +471,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
}
|
||||
return nil
|
||||
case "json":
|
||||
json, err := json.Marshal(envVars)
|
||||
jsondata, err := json.Marshal(envVars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(json)
|
||||
_, err = w.Write(jsondata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -486,11 +485,11 @@ func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
}
|
||||
return nil
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(envVars)
|
||||
yamldata, err := yaml.Marshal(envVars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(yaml)
|
||||
_, err = w.Write(yamldata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -509,7 +508,6 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
switch outputFormat {
|
||||
case "":
|
||||
// shell "none"
|
||||
break
|
||||
case "text":
|
||||
for _, n := range vars {
|
||||
_, err := fmt.Fprintf(w, "%s\n", n)
|
||||
|
@ -519,11 +517,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
}
|
||||
return nil
|
||||
case "json":
|
||||
json, err := json.Marshal(vars)
|
||||
jsondata, err := json.Marshal(vars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(json)
|
||||
_, err = w.Write(jsondata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -533,11 +531,11 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
|
|||
}
|
||||
return nil
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(vars)
|
||||
yamldata, err := yaml.Marshal(vars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(yaml)
|
||||
_, err = w.Write(yamldata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -157,12 +157,12 @@ func KubectlCommand(version, binaryURL string, args ...string) (*exec.Cmd, error
|
|||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version, binaryURL)
|
||||
binary, err := node.CacheKubectlBinary(version, binaryURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exec.Command(path, args...), nil
|
||||
return exec.Command(binary, args...), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -63,7 +63,7 @@ var logsCmd = &cobra.Command{
|
|||
Short: "Returns logs to debug a local Kubernetes cluster",
|
||||
Long: `Gets the logs of the running instance, used for debugging minikube, not user code.`,
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
var logOutput *os.File = os.Stdout
|
||||
logOutput := os.Stdout
|
||||
var err error
|
||||
|
||||
if fileOutput != "" {
|
||||
|
|
|
@ -313,7 +313,7 @@ func removePid(path string, pid string) error {
|
|||
|
||||
// we found the correct file
|
||||
// we're reading the pids...
|
||||
out, err := os.ReadFile(pidPath)
|
||||
data, err := os.ReadFile(pidPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "readFile")
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ func removePid(path string, pid string) error {
|
|||
pids := []string{}
|
||||
// we're splitting the mount-pids file content into a slice of strings
|
||||
// so that we can compare each to the PID we're looking for
|
||||
strPids := strings.Fields(string(out))
|
||||
strPids := strings.Fields(string(data))
|
||||
for _, p := range strPids {
|
||||
// If we find the PID, we don't add it to the slice
|
||||
if p == pid {
|
||||
|
|
|
@ -253,10 +253,10 @@ func podmanUnsetScript(ec PodmanEnvConfig, w io.Writer) error {
|
|||
|
||||
// podmanBridge returns the command to use in a var for accessing the podman varlink bridge over ssh
|
||||
func podmanBridge(client *ssh.ExternalClient) string {
|
||||
command := []string{client.BinaryPath}
|
||||
command = append(command, client.BaseArgs...)
|
||||
command = append(command, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge")
|
||||
return strings.Join(command, " ")
|
||||
cmd := []string{client.BinaryPath}
|
||||
cmd = append(cmd, client.BaseArgs...)
|
||||
cmd = append(cmd, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge")
|
||||
return strings.Join(cmd, " ")
|
||||
}
|
||||
|
||||
// podmanURL returns the url to use in a var for accessing the podman socket over ssh
|
||||
|
|
|
@ -282,7 +282,7 @@ func runStart(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
kubeconfig, err := startWithDriver(cmd, starter, existing)
|
||||
configInfo, err := startWithDriver(cmd, starter, existing)
|
||||
if err != nil {
|
||||
node.ExitIfFatal(err, useForce)
|
||||
exit.Error(reason.GuestStart, "failed to start node", err)
|
||||
|
@ -294,7 +294,7 @@ func runStart(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}
|
||||
|
||||
if err := showKubectlInfo(kubeconfig, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil {
|
||||
if err := showKubectlInfo(configInfo, starter.Node.KubernetesVersion, starter.Node.ContainerRuntime, starter.Cfg.Name); err != nil {
|
||||
klog.Errorf("kubectl info: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -363,11 +363,11 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
|
|||
}
|
||||
|
||||
if driver.IsVM(driverName) && !driver.IsSSH(driverName) {
|
||||
url, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
|
||||
urlString, err := download.ISO(viper.GetStringSlice(isoURL), cmd.Flags().Changed(isoURL))
|
||||
if err != nil {
|
||||
return node.Starter{}, errors.Wrap(err, "Failed to cache ISO")
|
||||
}
|
||||
cc.MinikubeISO = url
|
||||
cc.MinikubeISO = urlString
|
||||
}
|
||||
|
||||
var existingAddons map[string]bool
|
||||
|
@ -462,9 +462,9 @@ func imageMatchesBinaryVersion(imageVersion, binaryVersion string) bool {
|
|||
|
||||
func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) {
|
||||
// start primary control-plane node
|
||||
kubeconfig, err := node.Start(starter)
|
||||
configInfo, err := node.Start(starter)
|
||||
if err != nil {
|
||||
kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
|
||||
configInfo, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -512,7 +512,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.
|
|||
|
||||
pause.RemovePausedFile(starter.Runner)
|
||||
|
||||
return kubeconfig, nil
|
||||
return configInfo, nil
|
||||
}
|
||||
|
||||
func warnAboutMultiNodeCNI() {
|
||||
|
@ -528,14 +528,14 @@ func updateDriver(driverName string) {
|
|||
}
|
||||
}
|
||||
|
||||
func displayVersion(version string) {
|
||||
func displayVersion(ver string) {
|
||||
prefix := ""
|
||||
if ClusterFlagValue() != constants.DefaultClusterName {
|
||||
prefix = fmt.Sprintf("[%s] ", ClusterFlagValue())
|
||||
}
|
||||
|
||||
register.Reg.SetStep(register.InitialSetup)
|
||||
out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": version, "platform": platform()})
|
||||
out.Step(style.Happy, "{{.prefix}}minikube {{.version}} on {{.platform}}", out.V{"prefix": prefix, "version": ver, "platform": platform()})
|
||||
}
|
||||
|
||||
// displayEnviron makes the user aware of environment variables that will affect how minikube operates
|
||||
|
@ -631,7 +631,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co
|
|||
|
||||
// Re-generate the cluster config, just in case the failure was related to an old config format
|
||||
cc := updateExistingConfigFromFlags(cmd, &existing)
|
||||
var kubeconfig *kubeconfig.Settings
|
||||
var configInfo *kubeconfig.Settings
|
||||
for _, n := range cc.Nodes {
|
||||
r, p, m, h, err := node.Provision(&cc, &n, false)
|
||||
s := node.Starter{
|
||||
|
@ -650,14 +650,14 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co
|
|||
|
||||
k, err := node.Start(s)
|
||||
if n.ControlPlane {
|
||||
kubeconfig = k
|
||||
configInfo = k
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return kubeconfig, nil
|
||||
return configInfo, nil
|
||||
}
|
||||
// Don't delete the cluster unless they ask
|
||||
return nil, originalErr
|
||||
|
@ -902,12 +902,12 @@ func validateSpecifiedDriver(existing *config.ClusterConfig) {
|
|||
|
||||
// validateDriver validates that the selected driver appears sane, exits if not
|
||||
func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
||||
name := ds.Name
|
||||
os := detect.RuntimeOS()
|
||||
driverName := ds.Name
|
||||
osName := detect.RuntimeOS()
|
||||
arch := detect.RuntimeArch()
|
||||
klog.Infof("validating driver %q against %+v", name, existing)
|
||||
if !driver.Supported(name) {
|
||||
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": name, "os": os, "arch": arch})
|
||||
klog.Infof("validating driver %q against %+v", driverName, existing)
|
||||
if !driver.Supported(driverName) {
|
||||
exit.Message(reason.DrvUnsupportedOS, "The driver '{{.driver}}' is not supported on {{.os}}/{{.arch}}", out.V{"driver": driverName, "os": osName, "arch": arch})
|
||||
}
|
||||
|
||||
// if we are only downloading artifacts for a driver, we can stop validation here
|
||||
|
@ -916,7 +916,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
}
|
||||
|
||||
st := ds.State
|
||||
klog.Infof("status for %s: %+v", name, st)
|
||||
klog.Infof("status for %s: %+v", driverName, st)
|
||||
|
||||
if st.NeedsImprovement {
|
||||
out.Styled(style.Improvement, `For improved {{.driver}} performance, {{.fix}}`, out.V{"driver": driver.FullName(ds.Name), "fix": translate.T(st.Fix)})
|
||||
|
@ -924,7 +924,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
|
||||
if ds.Priority == registry.Obsolete {
|
||||
exit.Message(reason.Kind{
|
||||
ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(name)),
|
||||
ID: fmt.Sprintf("PROVIDER_%s_OBSOLETE", strings.ToUpper(driverName)),
|
||||
Advice: translate.T(st.Fix),
|
||||
ExitCode: reason.ExProviderUnsupported,
|
||||
URL: st.Doc,
|
||||
|
@ -943,23 +943,23 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
|
|||
|
||||
if !st.Installed {
|
||||
exit.Message(reason.Kind{
|
||||
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(name)),
|
||||
ID: fmt.Sprintf("PROVIDER_%s_NOT_FOUND", strings.ToUpper(driverName)),
|
||||
Advice: translate.T(st.Fix),
|
||||
ExitCode: reason.ExProviderNotFound,
|
||||
URL: st.Doc,
|
||||
Style: style.Shrug,
|
||||
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": name, "error": st.Error})
|
||||
}, `The '{{.driver}}' provider was not found: {{.error}}`, out.V{"driver": driverName, "error": st.Error})
|
||||
}
|
||||
|
||||
id := st.Reason
|
||||
if id == "" {
|
||||
id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(name))
|
||||
id = fmt.Sprintf("PROVIDER_%s_ERROR", strings.ToUpper(driverName))
|
||||
}
|
||||
|
||||
code := reason.ExProviderUnavailable
|
||||
|
||||
if !st.Running {
|
||||
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(name))
|
||||
id = fmt.Sprintf("PROVIDER_%s_NOT_RUNNING", strings.ToUpper(driverName))
|
||||
code = reason.ExProviderNotRunning
|
||||
}
|
||||
|
||||
|
@ -1515,15 +1515,15 @@ func defaultRuntime() string {
|
|||
}
|
||||
|
||||
// if container runtime is not docker, check that cni is not disabled
|
||||
func validateCNI(cmd *cobra.Command, runtime string) {
|
||||
if runtime == constants.Docker {
|
||||
func validateCNI(cmd *cobra.Command, runtimeName string) {
|
||||
if runtimeName == constants.Docker {
|
||||
return
|
||||
}
|
||||
if cmd.Flags().Changed(cniFlag) && strings.ToLower(viper.GetString(cniFlag)) == "false" {
|
||||
if viper.GetBool(force) {
|
||||
out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime})
|
||||
out.WarnReason(reason.Usage, "You have chosen to disable the CNI but the \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName})
|
||||
} else {
|
||||
exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtime})
|
||||
exit.Message(reason.Usage, "The \"{{.name}}\" container runtime requires CNI", out.V{"name": runtimeName})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2004,16 +2004,16 @@ func validateBareMetal(drvName string) {
|
|||
if err != nil {
|
||||
klog.Warningf("failed getting Kubernetes version: %v", err)
|
||||
}
|
||||
version, _ := util.ParseKubernetesVersion(kubeVer)
|
||||
if version.GTE(semver.MustParse("1.18.0-beta.1")) {
|
||||
ver, _ := util.ParseKubernetesVersion(kubeVer)
|
||||
if ver.GTE(semver.MustParse("1.18.0-beta.1")) {
|
||||
if _, err := exec.LookPath("conntrack"); err != nil {
|
||||
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": version.String()})
|
||||
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires conntrack to be installed in root's path", out.V{"k8sVersion": ver.String()})
|
||||
}
|
||||
}
|
||||
// crictl is required starting with Kubernetes 1.24, for all runtimes since the removal of dockershim
|
||||
if version.GTE(semver.MustParse("1.24.0-alpha.0")) {
|
||||
if ver.GTE(semver.MustParse("1.24.0-alpha.0")) {
|
||||
if _, err := exec.LookPath("crictl"); err != nil {
|
||||
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": version.String()})
|
||||
exit.Message(reason.GuestMissingConntrack, "Sorry, Kubernetes {{.k8sVersion}} requires crictl to be installed in root's path", out.V{"k8sVersion": ver.String()})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2062,24 +2062,24 @@ func startNerdctld() {
|
|||
runner := co.CP.Runner
|
||||
|
||||
// and set 777 to these files
|
||||
if out, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", out.Output()), err)
|
||||
if rest, err := runner.RunCmd(exec.Command("sudo", "chmod", "777", "/usr/local/bin/nerdctl", "/usr/local/bin/nerdctld")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed setting permission for nerdctl: %s", rest.Output()), err)
|
||||
}
|
||||
|
||||
// sudo systemctl start nerdctld.socket
|
||||
if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", out.Output()), err)
|
||||
if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.socket")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.socket: %s", rest.Output()), err)
|
||||
}
|
||||
// sudo systemctl start nerdctld.service
|
||||
if out, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", out.Output()), err)
|
||||
if rest, err := runner.RunCmd(exec.Command("sudo", "systemctl", "start", "nerdctld.service")); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to enable nerdctld.service: %s", rest.Output()), err)
|
||||
}
|
||||
|
||||
// set up environment variable on remote machine. docker client uses 'non-login & non-interactive shell' therefore the only way is to modify .bashrc file of user 'docker'
|
||||
// insert this at 4th line
|
||||
envSetupCommand := exec.Command("/bin/bash", "-c", "sed -i '4i export DOCKER_HOST=unix:///run/nerdctld.sock' .bashrc")
|
||||
if out, err := runner.RunCmd(envSetupCommand); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", out.Output()), err)
|
||||
if rest, err := runner.RunCmd(envSetupCommand); err != nil {
|
||||
exit.Error(reason.StartNerdctld, fmt.Sprintf("Failed to set up DOCKER_HOST: %s", rest.Output()), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -156,13 +156,13 @@ func checkLogFileMaxSize(file string, maxSizeKB int64) bool {
|
|||
// logFileName generates a default logfile name in the form minikube_<argv[1]>_<hash>_<count>.log from args
|
||||
func logFileName(dir string, logIdx int64) string {
|
||||
h := sha1.New()
|
||||
user, err := user.Current()
|
||||
userInfo, err := user.Current()
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to get username to add to log filename hash: %v", err)
|
||||
} else {
|
||||
_, err := h.Write([]byte(user.Username))
|
||||
_, err := h.Write([]byte(userInfo.Username))
|
||||
if err != nil {
|
||||
klog.Warningf("Unable to add username %s to log filename hash: %v", user.Username, err)
|
||||
klog.Warningf("Unable to add username %s to log filename hash: %v", userInfo.Username, err)
|
||||
}
|
||||
}
|
||||
for _, s := range pflag.Args() {
|
||||
|
|
|
@ -63,11 +63,13 @@ func execute() error {
|
|||
p := plot.New()
|
||||
|
||||
// Set view options
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)"
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
p.Title.Text = "CPU% Busy Overhead - With Auto Pause vs. Non Auto Pause (less is better)"
|
||||
}
|
||||
|
||||
p.Y.Label.Text = "CPU overhead%"
|
||||
|
||||
// Open non-autopause csv file of benchmark summary
|
||||
|
@ -158,9 +160,10 @@ func execute() error {
|
|||
p.Legend.Top = true
|
||||
|
||||
// Add x-lay names
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind")
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind")
|
||||
}
|
||||
|
||||
|
@ -223,16 +226,18 @@ func execute() error {
|
|||
p.Add(napl, apl)
|
||||
|
||||
// Output bar graph
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil {
|
||||
return errors.Wrap(err, "Failed to create bar graph png")
|
||||
}
|
||||
log.Printf("Generated graph png to %s/mac.png", FOLDER)
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil {
|
||||
return errors.Wrap(err, "Failed to create bar graph png")
|
||||
}
|
||||
log.Printf("Generated graph png to %s/linux.png", FOLDER)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -62,11 +62,13 @@ func execute() error {
|
|||
p := plot.New()
|
||||
|
||||
// Set view options
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on macOS (less is better)"
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
p.Title.Text = "CPU% Busy Overhead - Average first 5 minutes on Linux (less is better)"
|
||||
}
|
||||
|
||||
p.Y.Label.Text = "CPU overhead%"
|
||||
|
||||
// Open csv file of benchmark summary
|
||||
|
@ -114,9 +116,10 @@ func execute() error {
|
|||
p.Legend.Top = true
|
||||
|
||||
// Add x-lay names
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
p.NominalX("OS idle", "minikube hyperkit", "minikube virtualbox", "minikube docker", "Docker for Mac Kubernetes", "k3d", "kind")
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
p.NominalX("OS idle", "minikube kvm2", "minikube virtualbox", "minikube docker", "Docker idle", "k3d", "kind")
|
||||
}
|
||||
|
||||
|
@ -151,16 +154,18 @@ func execute() error {
|
|||
p.Add(cl)
|
||||
|
||||
// Output bar graph
|
||||
if runtime.GOOS == "darwin" {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
if err := p.Save(13*vg.Inch, 8*vg.Inch, FOLDER+"/mac.png"); err != nil {
|
||||
return errors.Wrap(err, "Failed to create bar graph png")
|
||||
}
|
||||
log.Printf("Generated graph png to %s/mac.png", FOLDER)
|
||||
} else if runtime.GOOS == "linux" {
|
||||
case "linux":
|
||||
if err := p.Save(13*vg.Inch, 10*vg.Inch, FOLDER+"/linux.png"); err != nil {
|
||||
return errors.Wrap(err, "Failed to create bar graph png")
|
||||
}
|
||||
log.Printf("Generated graph png to %s/linux.png", FOLDER)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ func updateHashFile(version, arch, filePath string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x buildkit-%s.linux-%s.tar.gz\n", sum, version, arch); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -117,7 +117,7 @@ func updateHashFile(version, arch, packagePath string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x cni-plugins-linux-%s-%s.tgz\n", sum, arch, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -107,7 +107,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -107,7 +107,7 @@ func updateHashFile(version string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -135,7 +135,7 @@ func updateHashFile(filePath, commit string, shaSum [sha256.Size]byte) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, commit)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, commit); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -111,7 +111,7 @@ func updateHashFile(version, arch, packagePath string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x crictl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -92,7 +92,7 @@ func updateHashFiles(version string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x crun-%s.tar.gz\n", sum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x crun-%s.tar.gz\n", sum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -105,7 +105,7 @@ func updateHashFile(version, arch, folderSuffix string, shaSum [sha256.Size]byte
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", shaSum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", shaSum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -95,7 +95,7 @@ func updateHashFile(version, arch, folderSuffix string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x docker-%s.tgz\n", sum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x docker-%s.tgz\n", sum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -154,7 +154,7 @@ func updateGoHashFile(version string) error {
|
|||
return fmt.Errorf("failed to open go.hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %s go%s.src.tar.gz\n", sha, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %s go%s.src.tar.gz\n", sha, version); err != nil {
|
||||
return fmt.Errorf("failed to write to go.hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -105,7 +105,7 @@ func updateHashFile(version, arch, packagePath string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x nerdctl-%s-linux-%s.tar.gz\n", sum, version, arch); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -91,7 +91,7 @@ func updateHashFiles(version string) error {
|
|||
return fmt.Errorf("failed to open hash file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.WriteString(fmt.Sprintf("sha256 %x %s.tar.gz\n", sum, version)); err != nil {
|
||||
if _, err := fmt.Fprintf(f, "sha256 %x %s.tar.gz\n", sum, version); err != nil {
|
||||
return fmt.Errorf("failed to write to hash file: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -37,9 +37,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st
|
|||
}
|
||||
|
||||
class := defaultStorageClassProvisioner
|
||||
if name == "storage-provisioner-gluster" {
|
||||
switch name {
|
||||
case "storage-provisioner-gluster":
|
||||
class = "glusterfile"
|
||||
} else if name == "storage-provisioner-rancher" {
|
||||
case "storage-provisioner-rancher":
|
||||
class = "local-path"
|
||||
}
|
||||
|
||||
|
|
|
@ -456,20 +456,20 @@ func (d *Driver) Stop() error {
|
|||
}
|
||||
}
|
||||
|
||||
runtime, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec})
|
||||
crMgr, err := cruntime.New(cruntime.Config{Type: d.NodeConfig.ContainerRuntime, Runner: d.exec})
|
||||
if err != nil { // won't return error because:
|
||||
// even though we can't stop the cotainers inside, we still wanna stop the minikube container itself
|
||||
klog.Errorf("unable to get container runtime: %v", err)
|
||||
} else {
|
||||
containers, err := runtime.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces})
|
||||
containers, err := crMgr.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces})
|
||||
if err != nil {
|
||||
klog.Infof("unable list containers : %v", err)
|
||||
}
|
||||
if len(containers) > 0 {
|
||||
if err := runtime.StopContainers(containers); err != nil {
|
||||
if err := crMgr.StopContainers(containers); err != nil {
|
||||
klog.Infof("unable to stop containers : %v", err)
|
||||
}
|
||||
if err := runtime.KillContainers(containers); err != nil {
|
||||
if err := crMgr.KillContainers(containers); err != nil {
|
||||
klog.Errorf("unable to kill containers : %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func CreateNetwork(ociBin, networkName, subnet, staticIP string) (net.IP, error)
|
|||
return info.gateway, nil
|
||||
}
|
||||
// don't retry if error is not address is taken
|
||||
if !(errors.Is(err, ErrNetworkSubnetTaken) || errors.Is(err, ErrNetworkGatewayTaken)) {
|
||||
if !errors.Is(err, ErrNetworkSubnetTaken) && !errors.Is(err, ErrNetworkGatewayTaken) {
|
||||
klog.Errorf("error while trying to create %s network %s %s: %v", ociBin, networkName, subnet.CIDR, err)
|
||||
return nil, fmt.Errorf("un-retryable: %w", err)
|
||||
}
|
||||
|
|
|
@ -191,9 +191,10 @@ func CreateContainerNode(p CreateParams) error { //nolint to suppress cyclomatic
|
|||
runArgs = append(runArgs, "--ip", p.IP)
|
||||
}
|
||||
|
||||
if p.GPUs == "all" || p.GPUs == "nvidia" {
|
||||
switch p.GPUs {
|
||||
case "all", "nvidia":
|
||||
runArgs = append(runArgs, "--gpus", "all", "--env", "NVIDIA_DRIVER_CAPABILITIES=all")
|
||||
} else if p.GPUs == "amd" {
|
||||
case "amd":
|
||||
/* https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html
|
||||
* "--security-opt seccomp=unconfined" is also required but included above.
|
||||
*/
|
||||
|
|
|
@ -225,15 +225,15 @@ func (d *Driver) createNetwork() error {
|
|||
log.Debugf("created network xml: %s", networkXML.String())
|
||||
|
||||
// define the network using our template
|
||||
var network *libvirt.Network
|
||||
network, err = conn.NetworkDefineXML(networkXML.String())
|
||||
var libvirtNet *libvirt.Network
|
||||
libvirtNet, err = conn.NetworkDefineXML(networkXML.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err)
|
||||
}
|
||||
|
||||
// and finally create & start it
|
||||
log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR)
|
||||
if err = network.Create(); err == nil {
|
||||
if err = libvirtNet.Create(); err == nil {
|
||||
log.Debugf("private KVM network %s %s created", d.PrivateNetwork, subnet.CIDR)
|
||||
return nil
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ func (d *Driver) deleteNetwork() error {
|
|||
|
||||
// network: private
|
||||
log.Debugf("Checking if network %s exists...", d.PrivateNetwork)
|
||||
network, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
libvirtNet, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
if err != nil {
|
||||
if lvErr(err).Code == libvirt.ERR_NO_NETWORK {
|
||||
log.Warnf("Network %s does not exist. Skipping deletion", d.PrivateNetwork)
|
||||
|
@ -271,7 +271,7 @@ func (d *Driver) deleteNetwork() error {
|
|||
}
|
||||
return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork)
|
||||
}
|
||||
defer func() { _ = network.Free() }()
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
log.Debugf("Network %s exists", d.PrivateNetwork)
|
||||
|
||||
err = d.checkDomains(conn)
|
||||
|
@ -283,18 +283,18 @@ func (d *Driver) deleteNetwork() error {
|
|||
|
||||
log.Debugf("Trying to delete network %s...", d.PrivateNetwork)
|
||||
deleteFunc := func() error {
|
||||
active, err := network.IsActive()
|
||||
active, err := libvirtNet.IsActive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if active {
|
||||
log.Debugf("Destroying active network %s", d.PrivateNetwork)
|
||||
if err := network.Destroy(); err != nil {
|
||||
if err := libvirtNet.Destroy(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Debugf("Undefining inactive network %s", d.PrivateNetwork)
|
||||
return network.Undefine()
|
||||
return libvirtNet.Undefine()
|
||||
}
|
||||
if err := retry.Local(deleteFunc, 10*time.Second); err != nil {
|
||||
return errors.Wrap(err, "deleting network")
|
||||
|
@ -391,23 +391,23 @@ func (d *Driver) checkDomains(conn *libvirt.Connect) error {
|
|||
|
||||
// addStaticIP appends new host's name, MAC and static IP address record to list of network DHCP leases.
|
||||
// It will return nil if host record already exists.
|
||||
func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error {
|
||||
l, err := dhcpLease(conn, network, hostname, mac, ip)
|
||||
func addStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error {
|
||||
l, err := dhcpLease(conn, networkName, hostname, mac, ip)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err)
|
||||
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err)
|
||||
}
|
||||
if l != nil {
|
||||
log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip)
|
||||
log.Debugf("skip adding static IP to network %s - found existing host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
net, err := conn.LookupNetworkByName(network)
|
||||
libvirtNet, err := conn.LookupNetworkByName(networkName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s: %w", network, err)
|
||||
return fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = net.Free() }()
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
|
||||
return net.Update(
|
||||
return libvirtNet.Update(
|
||||
libvirt.NETWORK_UPDATE_COMMAND_ADD_LAST,
|
||||
libvirt.NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1,
|
||||
|
@ -417,23 +417,23 @@ func addStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error
|
|||
|
||||
// delStaticIP deletes static IP address record that matches given combination of host's name, MAC and IP from list of network DHCP leases.
|
||||
// It will return nil if record doesn't exist.
|
||||
func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error {
|
||||
l, err := dhcpLease(conn, network, hostname, mac, ip)
|
||||
func delStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) error {
|
||||
l, err := dhcpLease(conn, networkName, hostname, mac, ip)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", network, hostname, mac, ip, err)
|
||||
return fmt.Errorf("failed looking up network %s for host DHCP lease {name: %q, mac: %q, ip: %q}: %w", networkName, hostname, mac, ip, err)
|
||||
}
|
||||
if l == nil {
|
||||
log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", network, hostname, mac, ip)
|
||||
log.Debugf("skip deleting static IP from network %s - couldn't find host DHCP lease matching {name: %q, mac: %q, ip: %q}", networkName, hostname, mac, ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
net, err := conn.LookupNetworkByName(network)
|
||||
libvirtNet, err := conn.LookupNetworkByName(networkName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s: %w", network, err)
|
||||
return fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = net.Free() }()
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
|
||||
return net.Update(
|
||||
return libvirtNet.Update(
|
||||
libvirt.NETWORK_UPDATE_COMMAND_DELETE,
|
||||
libvirt.NETWORK_SECTION_IP_DHCP_HOST,
|
||||
-1,
|
||||
|
@ -442,56 +442,56 @@ func delStaticIP(conn *libvirt.Connect, network, hostname, mac, ip string) error
|
|||
}
|
||||
|
||||
// dhcpLease returns network DHCP lease that matches given combination of host's name, MAC and IP.
|
||||
func dhcpLease(conn *libvirt.Connect, network, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) {
|
||||
func dhcpLease(conn *libvirt.Connect, networkName, hostname, mac, ip string) (lease *libvirt.NetworkDHCPLease, err error) {
|
||||
if hostname == "" && mac == "" && ip == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
net, err := conn.LookupNetworkByName(network)
|
||||
libvirtNet, err := conn.LookupNetworkByName(networkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed looking up network %s: %w", network, err)
|
||||
return nil, fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = net.Free() }()
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
|
||||
leases, err := net.GetDHCPLeases()
|
||||
leases, err := libvirtNet.GetDHCPLeases()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed getting host DHCP leases: %w", err)
|
||||
}
|
||||
|
||||
for _, l := range leases {
|
||||
if (hostname == "" || hostname == l.Hostname) && (mac == "" || mac == l.Mac) && (ip == "" || ip == l.IPaddr) {
|
||||
log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, network, l)
|
||||
log.Debugf("found host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s: %+v", hostname, mac, ip, networkName, l)
|
||||
return &l, nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, network)
|
||||
log.Debugf("unable to find host DHCP lease matching {name: %q, mac: %q, ip: %q} in network %s", hostname, mac, ip, networkName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ipFromAPI returns current primary IP address of domain interface in network.
|
||||
func ipFromAPI(conn *libvirt.Connect, domain, network string) (string, error) {
|
||||
mac, err := macFromXML(conn, domain, network)
|
||||
func ipFromAPI(conn *libvirt.Connect, domain, networkName string) (string, error) {
|
||||
mac, err := macFromXML(conn, domain, networkName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed getting MAC address: %w", err)
|
||||
}
|
||||
|
||||
ifaces, err := ifListFromAPI(conn, domain)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", network, domain, err)
|
||||
return "", fmt.Errorf("failed getting network %s interfaces using API of domain %s: %w", networkName, domain, err)
|
||||
}
|
||||
for _, i := range ifaces {
|
||||
if i.Hwaddr == mac {
|
||||
if i.Addrs != nil {
|
||||
log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, network)
|
||||
log.Debugf("domain %s has current primary IP address %s and MAC address %s in network %s", domain, i.Addrs[0].Addr, mac, networkName)
|
||||
return i.Addrs[0].Addr, nil
|
||||
}
|
||||
log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, network, i)
|
||||
log.Debugf("domain %s with MAC address %s doesn't have current IP address in network %s: %+v", domain, mac, networkName, i)
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("unable to find current IP address of domain %s in network %s", domain, network)
|
||||
log.Debugf("unable to find current IP address of domain %s in network %s", domain, networkName)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
@ -522,40 +522,40 @@ func ifListFromAPI(conn *libvirt.Connect, domain string) ([]libvirt.DomainInterf
|
|||
}
|
||||
|
||||
// ipFromXML returns defined IP address of interface in network.
|
||||
func ipFromXML(conn *libvirt.Connect, domain, network string) (string, error) {
|
||||
mac, err := macFromXML(conn, domain, network)
|
||||
func ipFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) {
|
||||
mac, err := macFromXML(conn, domain, networkName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed getting MAC address: %w", err)
|
||||
}
|
||||
|
||||
lease, err := dhcpLease(conn, network, "", mac, "")
|
||||
lease, err := dhcpLease(conn, networkName, "", mac, "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: <any>, mac: %q, ip: <any>}: %w", network, mac, err)
|
||||
return "", fmt.Errorf("failed looking up network %s for host DHCP lease {name: <any>, mac: %q, ip: <any>}: %w", networkName, mac, err)
|
||||
}
|
||||
if lease == nil {
|
||||
log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", network, mac)
|
||||
log.Debugf("unable to find defined IP address of network %s interface with MAC address %s", networkName, mac)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, network)
|
||||
log.Debugf("domain %s has defined IP address %s and MAC address %s in network %s", domain, lease.IPaddr, mac, networkName)
|
||||
return lease.IPaddr, nil
|
||||
}
|
||||
|
||||
// macFromXML returns defined MAC address of interface in network from domain XML.
|
||||
func macFromXML(conn *libvirt.Connect, domain, network string) (string, error) {
|
||||
func macFromXML(conn *libvirt.Connect, domain, networkName string) (string, error) {
|
||||
domIfs, err := ifListFromXML(conn, domain)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", network, domain, err)
|
||||
return "", fmt.Errorf("failed getting network %s interfaces using XML of domain %s: %w", networkName, domain, err)
|
||||
}
|
||||
|
||||
for _, i := range domIfs {
|
||||
if i.Source.Network == network {
|
||||
log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, network)
|
||||
if i.Source.Network == networkName {
|
||||
log.Debugf("domain %s has defined MAC address %s in network %s", domain, i.Mac.Address, networkName)
|
||||
return i.Mac.Address, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", network, domain, network)
|
||||
return "", fmt.Errorf("unable to get defined MAC address of network %s interface using XML of domain %s: network %s not found", networkName, domain, networkName)
|
||||
}
|
||||
|
||||
// ifListFromXML returns defined domain interfaces from domain XML.
|
||||
|
|
|
@ -97,17 +97,17 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer) error {
|
|||
buf.WriteString(long + "\n\n")
|
||||
|
||||
if cmd.Runnable() {
|
||||
buf.WriteString(fmt.Sprintf("```shell\n%s\n```\n\n", cmd.UseLine()))
|
||||
fmt.Fprintf(buf, "```shell\n%s\n```\n\n", cmd.UseLine())
|
||||
}
|
||||
|
||||
if len(cmd.Aliases) > 0 {
|
||||
buf.WriteString("### Aliases\n\n")
|
||||
buf.WriteString(fmt.Sprintf("%s\n\n", cmd.Aliases))
|
||||
fmt.Fprintf(buf, "%s\n\n", cmd.Aliases)
|
||||
}
|
||||
|
||||
if len(cmd.Example) > 0 {
|
||||
buf.WriteString("### Examples\n\n")
|
||||
buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example))
|
||||
fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example)
|
||||
}
|
||||
|
||||
if err := printOptions(buf, cmd); err != nil {
|
||||
|
|
|
@ -83,7 +83,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error {
|
|||
|
||||
// This is the numeric code of the error, e.g. 80 for ExGuest Error
|
||||
code := s.Value
|
||||
buf.WriteString(fmt.Sprintf("%s: %s \n", code, currentError))
|
||||
fmt.Fprintf(buf, "%s: %s \n", code, currentError)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
@ -100,7 +100,7 @@ func ErrorCodes(docPath string, pathsToCheck []string) error {
|
|||
currentNode = id.Name
|
||||
if strings.HasPrefix(currentNode, "Ex") && currentNode != "ExitCode" {
|
||||
// We have all the info we're going to get on this error, print it out
|
||||
buf.WriteString(fmt.Sprintf("%s (Exit code %v) \n", currentID, currentNode))
|
||||
fmt.Fprintf(buf, "%s (Exit code %v) \n", currentID, currentNode)
|
||||
if currentComment != "" {
|
||||
buf.WriteString(currentComment + " \n")
|
||||
}
|
||||
|
|
|
@ -48,21 +48,21 @@ var (
|
|||
)
|
||||
|
||||
// ClientConfig returns the client configuration for a kubectl context
|
||||
func ClientConfig(context string) (*rest.Config, error) {
|
||||
func ClientConfig(ctx string) (*rest.Config, error) {
|
||||
loader := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: context})
|
||||
cc := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{CurrentContext: ctx})
|
||||
c, err := cc.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("client config: %v", err)
|
||||
}
|
||||
c = proxy.UpdateTransport(c)
|
||||
klog.V(1).Infof("client config for %s: %+v", context, c)
|
||||
klog.V(1).Infof("client config for %s: %+v", ctx, c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Client gets the Kubernetes client for a kubectl context name
|
||||
func Client(context string) (*kubernetes.Clientset, error) {
|
||||
c, err := ClientConfig(context)
|
||||
func Client(ctx string) (*kubernetes.Clientset, error) {
|
||||
c, err := ClientConfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -579,8 +579,8 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error {
|
|||
|
||||
// canRead returns true if the file represented
|
||||
// by path exists and is readable, otherwise false.
|
||||
func canRead(path string) bool {
|
||||
f, err := os.Open(path)
|
||||
func canRead(filePath string) bool {
|
||||
f, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -164,9 +164,11 @@ func auxiliary(mirror string) []string {
|
|||
func storageProvisioner(mirror string) string {
|
||||
cv := version.GetStorageProvisionerVersion()
|
||||
in := "k8s-minikube/storage-provisioner:" + cv
|
||||
if mirror == "" {
|
||||
|
||||
switch mirror {
|
||||
case "":
|
||||
mirror = "gcr.io"
|
||||
} else if mirror == constants.AliyunMirror {
|
||||
case constants.AliyunMirror:
|
||||
in = "storage-provisioner:" + cv
|
||||
}
|
||||
return path.Join(mirror, in)
|
||||
|
|
|
@ -172,7 +172,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) {
|
|||
|
||||
// init initialises primary control-plane using kubeadm.
|
||||
func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
|
||||
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing Kubernetes version")
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
|
|||
"Swap", // For "none" users who have swap configured
|
||||
"NumCPU", // For "none" users who have too few CPUs
|
||||
}
|
||||
if version.GE(semver.MustParse("1.20.0")) {
|
||||
if ver.GE(semver.MustParse("1.20.0")) {
|
||||
ignore = append(ignore,
|
||||
"Mem", // For "none" users who have too little memory
|
||||
)
|
||||
|
@ -719,7 +719,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
|
|||
// and by that time we would exit completely, so we wait until kubelet begins restarting pods
|
||||
klog.Info("waiting for restarted kubelet to initialise ...")
|
||||
start := time.Now()
|
||||
wait := func() error {
|
||||
waitFunc := func() error {
|
||||
pods, err := client.CoreV1().Pods(meta.NamespaceSystem).List(context.Background(), meta.ListOptions{LabelSelector: "tier=control-plane"})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -731,7 +731,7 @@ func (k *Bootstrapper) restartPrimaryControlPlane(cfg config.ClusterConfig) erro
|
|||
}
|
||||
return fmt.Errorf("kubelet not initialised")
|
||||
}
|
||||
_ = retry.Expo(wait, 250*time.Millisecond, 1*time.Minute)
|
||||
_ = retry.Expo(waitFunc, 250*time.Millisecond, 1*time.Minute)
|
||||
klog.Infof("kubelet initialised")
|
||||
klog.Infof("duration metric: took %s waiting for restarted kubelet to initialise ...", time.Since(start))
|
||||
}
|
||||
|
@ -784,11 +784,11 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
|
|||
joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd))
|
||||
|
||||
// avoid "Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock" error
|
||||
version, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
|
||||
ver, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "parsing Kubernetes version")
|
||||
}
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: version})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: k.c, Socket: cc.KubernetesConfig.CRISocket, KubernetesVersion: ver})
|
||||
if err != nil {
|
||||
klog.Errorf("cruntime: %v", err)
|
||||
}
|
||||
|
@ -840,11 +840,11 @@ func StopKubernetes(runner command.Runner, cr cruntime.Manager) {
|
|||
|
||||
// DeleteCluster removes the components that were started earlier
|
||||
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
||||
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
ver, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing Kubernetes version")
|
||||
}
|
||||
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: version})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket, KubernetesVersion: ver})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
|
@ -852,7 +852,7 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
|||
ka := bsutil.InvokeKubeadm(k8s.KubernetesVersion)
|
||||
sp := cr.SocketPath()
|
||||
cmd := fmt.Sprintf("%s reset --cri-socket %s --force", ka, sp)
|
||||
if version.LT(semver.MustParse("1.11.0")) {
|
||||
if ver.LT(semver.MustParse("1.11.0")) {
|
||||
cmd = fmt.Sprintf("%s reset --cri-socket %s", ka, sp)
|
||||
}
|
||||
|
||||
|
@ -874,12 +874,12 @@ func (k *Bootstrapper) SetupCerts(k8s config.ClusterConfig, n config.Node, pcpCm
|
|||
func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
||||
klog.Infof("updating cluster %+v ...", cfg)
|
||||
|
||||
images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion)
|
||||
imgs, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "kubeadm images")
|
||||
}
|
||||
|
||||
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
ver, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing Kubernetes version")
|
||||
}
|
||||
|
@ -887,7 +887,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
Type: cfg.KubernetesConfig.ContainerRuntime,
|
||||
Runner: k.c,
|
||||
Socket: cfg.KubernetesConfig.CRISocket,
|
||||
KubernetesVersion: version,
|
||||
KubernetesVersion: ver,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
|
@ -903,7 +903,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
if cfg.KubernetesConfig.ShouldLoadCachedImages {
|
||||
if err := machine.LoadCachedImages(&cfg, k.c, images, detect.ImageCacheDir(), false); err != nil {
|
||||
if err := machine.LoadCachedImages(&cfg, k.c, imgs, detect.ImageCacheDir(), false); err != nil {
|
||||
out.FailureT("Unable to load cached images: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,21 +34,21 @@ import (
|
|||
)
|
||||
|
||||
// HostIP gets the ip address to be used for mapping host -> VM and VM -> host
|
||||
func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
||||
switch host.DriverName {
|
||||
func HostIP(hostInfo *host.Host, clusterName string) (net.IP, error) {
|
||||
switch hostInfo.DriverName {
|
||||
case driver.Docker:
|
||||
return oci.RoutableHostIPFromInside(oci.Docker, clusterName, host.Name)
|
||||
return oci.RoutableHostIPFromInside(oci.Docker, clusterName, hostInfo.Name)
|
||||
case driver.Podman:
|
||||
return oci.RoutableHostIPFromInside(oci.Podman, clusterName, host.Name)
|
||||
return oci.RoutableHostIPFromInside(oci.Podman, clusterName, hostInfo.Name)
|
||||
case driver.SSH:
|
||||
ip, err := host.Driver.GetIP()
|
||||
ip, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address")
|
||||
}
|
||||
return net.ParseIP(ip), nil
|
||||
case driver.KVM2:
|
||||
// `host.Driver.GetIP` returns dhcp lease info for a given network(=`virsh net-dhcp-leases minikube-net`)
|
||||
vmIPString, err := host.Driver.GetIP()
|
||||
vmIPString, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrap(err, "Error getting VM/Host IP address")
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
|||
}
|
||||
return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil
|
||||
case driver.QEMU, driver.QEMU2:
|
||||
ipString, err := host.Driver.GetIP()
|
||||
ipString, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrap(err, "Error getting IP address")
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
|||
// socket_vmnet network case
|
||||
return net.ParseIP("192.168.105.1"), nil
|
||||
case driver.HyperV:
|
||||
v := reflect.ValueOf(host.Driver).Elem()
|
||||
v := reflect.ValueOf(hostInfo.Driver).Elem()
|
||||
var hypervVirtualSwitch string
|
||||
// We don't have direct access to hyperv.Driver so use reflection to retrieve the virtual switch name
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
|
@ -91,7 +91,7 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
|||
return ip, nil
|
||||
case driver.VirtualBox:
|
||||
vBoxManageCmd := driver.VBoxManagePath()
|
||||
out, err := exec.Command(vBoxManageCmd, "showvminfo", host.Name, "--machinereadable").Output()
|
||||
out, err := exec.Command(vBoxManageCmd, "showvminfo", hostInfo.Name, "--machinereadable").Output()
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrap(err, "vboxmanage")
|
||||
}
|
||||
|
@ -126,11 +126,11 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
|||
|
||||
return net.ParseIP(ip), nil
|
||||
case driver.HyperKit:
|
||||
vmIPString, _ := host.Driver.GetIP()
|
||||
vmIPString, _ := hostInfo.Driver.GetIP()
|
||||
gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1"
|
||||
return net.ParseIP(gatewayIPString), nil
|
||||
case driver.VMware:
|
||||
vmIPString, err := host.Driver.GetIP()
|
||||
vmIPString, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return []byte{}, errors.Wrap(err, "Error getting VM IP address")
|
||||
}
|
||||
|
@ -140,28 +140,28 @@ func HostIP(host *host.Host, clusterName string) (net.IP, error) {
|
|||
}
|
||||
return net.IPv4(vmIP[0], vmIP[1], vmIP[2], byte(1)), nil
|
||||
case driver.VFKit:
|
||||
vmIPString, _ := host.Driver.GetIP()
|
||||
vmIPString, _ := hostInfo.Driver.GetIP()
|
||||
gatewayIPString := vmIPString[:strings.LastIndex(vmIPString, ".")+1] + "1"
|
||||
return net.ParseIP(gatewayIPString), nil
|
||||
case driver.None:
|
||||
return net.ParseIP("127.0.0.1"), nil
|
||||
default:
|
||||
return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", host.DriverName)
|
||||
return []byte{}, fmt.Errorf("HostIP not yet implemented for %q driver", hostInfo.DriverName)
|
||||
}
|
||||
}
|
||||
|
||||
// DriverIP gets the ip address of the current minikube cluster
|
||||
func DriverIP(api libmachine.API, machineName string) (net.IP, error) {
|
||||
host, err := machine.LoadHost(api, machineName)
|
||||
hostInfo, err := machine.LoadHost(api, machineName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipStr, err := host.Driver.GetIP()
|
||||
ipStr, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting IP")
|
||||
}
|
||||
if driver.IsKIC(host.DriverName) {
|
||||
if driver.IsKIC(hostInfo.DriverName) {
|
||||
ipStr = oci.DefaultBindIPV4
|
||||
}
|
||||
ip := net.ParseIP(ipStr)
|
||||
|
|
|
@ -245,18 +245,18 @@ func ConfigureDefaultBridgeCNIs(r Runner, networkPlugin string) error {
|
|||
|
||||
// disableAllBridgeCNIs disables all bridge cnis by changing extension to "mk_disabled" of all *bridge* config file(s) found in default location (ie, /etc/cni/net.d).
|
||||
func disableAllBridgeCNIs(r Runner) error {
|
||||
path := "/etc/cni/net.d"
|
||||
cniPath := "/etc/cni/net.d"
|
||||
|
||||
out, err := r.RunCmd(exec.Command(
|
||||
// for cri-o, we also disable 87-podman.conflist (that does not have 'bridge' in its name)
|
||||
"sudo", "find", path, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c",
|
||||
"sudo", "find", cniPath, "-maxdepth", "1", "-type", "f", "(", "(", "-name", "*bridge*", "-or", "-name", "*podman*", ")", "-and", "-not", "-name", "*.mk_disabled", ")", "-printf", "%p, ", "-exec", "sh", "-c",
|
||||
`sudo mv {} {}.mk_disabled`, ";"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", path, err)
|
||||
return fmt.Errorf("failed to disable all bridge cni configs in %q: %v", cniPath, err)
|
||||
}
|
||||
configs := strings.Trim(out.Stdout.String(), ", ")
|
||||
if len(configs) == 0 {
|
||||
klog.Infof("no active bridge cni configs found in %q - nothing to disable", path)
|
||||
klog.Infof("no active bridge cni configs found in %q - nothing to disable", cniPath)
|
||||
return nil
|
||||
}
|
||||
klog.Infof("disabled [%s] bridge cni config(s)", configs)
|
||||
|
|
|
@ -44,10 +44,10 @@ type kicRunner struct {
|
|||
}
|
||||
|
||||
// NewKICRunner returns a kicRunner implementor of runner which runs cmds inside a container
|
||||
func NewKICRunner(containerNameOrID string, oci string) Runner {
|
||||
func NewKICRunner(containerNameOrID string, ociName string) Runner {
|
||||
return &kicRunner{
|
||||
nameOrID: containerNameOrID,
|
||||
ociBin: oci, // docker or podman
|
||||
ociBin: ociName, // docker or podman
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -271,8 +271,8 @@ func copyToPodman(src string, dest string) error {
|
|||
defer file.Close()
|
||||
parts := strings.Split(dest, ":")
|
||||
container := parts[0]
|
||||
path := parts[1]
|
||||
cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", path)
|
||||
containerPath := parts[1]
|
||||
cmd := exec.Command(oci.Podman, "exec", "-i", container, "tee", containerPath)
|
||||
cmd.Stdin = file
|
||||
klog.Infof("Run: %v", cmd)
|
||||
if err := cmd.Run(); err != nil {
|
||||
|
|
|
@ -58,7 +58,7 @@ func ControlPlanes(cc ClusterConfig) []Node {
|
|||
func IsPrimaryControlPlane(cc ClusterConfig, node Node) bool {
|
||||
// TODO (prezha): find where, for "none" driver, we set first (ie, primary control-plane) node name to "m01" - that should not happen but it's happening before pr #17909
|
||||
// return node.ControlPlane && node.Name == ""
|
||||
return cc.Nodes != nil && cc.Nodes[0].Name == node.Name
|
||||
return len(cc.Nodes) > 0 && cc.Nodes[0].Name == node.Name
|
||||
}
|
||||
|
||||
// IsValid checks if the profile has the essential info needed for a profile
|
||||
|
|
|
@ -281,9 +281,9 @@ func (r *Containerd) ListImages(ListImagesOptions) ([]ListImage, error) {
|
|||
}
|
||||
|
||||
// LoadImage loads an image into this runtime
|
||||
func (r *Containerd) LoadImage(path string) error {
|
||||
klog.Infof("Loading image: %s", path)
|
||||
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", path)
|
||||
func (r *Containerd) LoadImage(imagePath string) error {
|
||||
klog.Infof("Loading image: %s", imagePath)
|
||||
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", imagePath)
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrapf(err, "ctr images import")
|
||||
}
|
||||
|
@ -296,9 +296,9 @@ func (r *Containerd) PullImage(name string) error {
|
|||
}
|
||||
|
||||
// SaveImage save an image from this runtime
|
||||
func (r *Containerd) SaveImage(name string, path string) error {
|
||||
klog.Infof("Saving image %s: %s", name, path)
|
||||
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", path, name)
|
||||
func (r *Containerd) SaveImage(name string, destPath string) error {
|
||||
klog.Infof("Saving image %s: %s", name, destPath)
|
||||
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", destPath, name)
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrapf(err, "ctr images export")
|
||||
}
|
||||
|
@ -526,11 +526,11 @@ func (r *Containerd) Preload(cc config.ClusterConfig) error {
|
|||
cRuntime := cc.KubernetesConfig.ContainerRuntime
|
||||
|
||||
// If images already exist, return
|
||||
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting images")
|
||||
}
|
||||
if containerdImagesPreloaded(r.Runner, images) {
|
||||
if containerdImagesPreloaded(r.Runner, imgs) {
|
||||
klog.Info("Images already preloaded, skipping extraction")
|
||||
return nil
|
||||
}
|
||||
|
@ -583,7 +583,7 @@ func (r *Containerd) Restart() error {
|
|||
}
|
||||
|
||||
// containerdImagesPreloaded returns true if all images have been preloaded
|
||||
func containerdImagesPreloaded(runner command.Runner, images []string) bool {
|
||||
func containerdImagesPreloaded(runner command.Runner, imgs []string) bool {
|
||||
var rr *command.RunResult
|
||||
|
||||
imageList := func() (err error) {
|
||||
|
@ -604,7 +604,7 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
}
|
||||
|
||||
// Make sure images == imgs
|
||||
for _, i := range images {
|
||||
for _, i := range imgs {
|
||||
found := false
|
||||
for _, ji := range jsonImages.Images {
|
||||
for _, rt := range ji.RepoTags {
|
||||
|
@ -629,6 +629,6 @@ func containerdImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
}
|
||||
|
||||
// ImagesPreloaded returns true if all images have been preloaded
|
||||
func (r *Containerd) ImagesPreloaded(images []string) bool {
|
||||
return containerdImagesPreloaded(r.Runner, images)
|
||||
func (r *Containerd) ImagesPreloaded(imgs []string) bool {
|
||||
return containerdImagesPreloaded(r.Runner, imgs)
|
||||
}
|
||||
|
|
|
@ -271,9 +271,9 @@ func (r *CRIO) ListImages(ListImagesOptions) ([]ListImage, error) {
|
|||
}
|
||||
|
||||
// LoadImage loads an image into this runtime
|
||||
func (r *CRIO) LoadImage(path string) error {
|
||||
klog.Infof("Loading image: %s", path)
|
||||
c := exec.Command("sudo", "podman", "load", "-i", path)
|
||||
func (r *CRIO) LoadImage(imgPath string) error {
|
||||
klog.Infof("Loading image: %s", imgPath)
|
||||
c := exec.Command("sudo", "podman", "load", "-i", imgPath)
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrap(err, "crio load image")
|
||||
}
|
||||
|
@ -286,9 +286,9 @@ func (r *CRIO) PullImage(name string) error {
|
|||
}
|
||||
|
||||
// SaveImage saves an image from this runtime
|
||||
func (r *CRIO) SaveImage(name string, path string) error {
|
||||
klog.Infof("Saving image %s: %s", name, path)
|
||||
c := exec.Command("sudo", "podman", "save", name, "-o", path)
|
||||
func (r *CRIO) SaveImage(name string, destPath string) error {
|
||||
klog.Infof("Saving image %s: %s", name, destPath)
|
||||
c := exec.Command("sudo", "podman", "save", name, "-o", destPath)
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrap(err, "crio save image")
|
||||
}
|
||||
|
@ -425,11 +425,11 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error {
|
|||
cRuntime := cc.KubernetesConfig.ContainerRuntime
|
||||
|
||||
// If images already exist, return
|
||||
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting images")
|
||||
}
|
||||
if crioImagesPreloaded(r.Runner, images) {
|
||||
if crioImagesPreloaded(r.Runner, imgs) {
|
||||
klog.Info("Images already preloaded, skipping extraction")
|
||||
return nil
|
||||
}
|
||||
|
@ -477,7 +477,7 @@ func (r *CRIO) Preload(cc config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
// crioImagesPreloaded returns true if all images have been preloaded
|
||||
func crioImagesPreloaded(runner command.Runner, images []string) bool {
|
||||
func crioImagesPreloaded(runner command.Runner, imgs []string) bool {
|
||||
rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json"))
|
||||
if err != nil {
|
||||
return false
|
||||
|
@ -491,7 +491,7 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
}
|
||||
|
||||
// Make sure images == imgs
|
||||
for _, i := range images {
|
||||
for _, i := range imgs {
|
||||
found := false
|
||||
for _, ji := range jsonImages.Images {
|
||||
for _, rt := range ji.RepoTags {
|
||||
|
@ -516,6 +516,6 @@ func crioImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
}
|
||||
|
||||
// ImagesPreloaded returns true if all images have been preloaded
|
||||
func (r *CRIO) ImagesPreloaded(images []string) bool {
|
||||
return crioImagesPreloaded(r.Runner, images)
|
||||
func (r *CRIO) ImagesPreloaded(imgs []string) bool {
|
||||
return crioImagesPreloaded(r.Runner, imgs)
|
||||
}
|
||||
|
|
|
@ -285,9 +285,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) {
|
|||
Tag string `json:"Tag"`
|
||||
Size string `json:"Size"`
|
||||
}
|
||||
images := strings.Split(rr.Stdout.String(), "\n")
|
||||
imgs := strings.Split(rr.Stdout.String(), "\n")
|
||||
result := []ListImage{}
|
||||
for _, img := range images {
|
||||
for _, img := range imgs {
|
||||
if img == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -313,9 +313,9 @@ func (r *Docker) ListImages(ListImagesOptions) ([]ListImage, error) {
|
|||
}
|
||||
|
||||
// LoadImage loads an image into this runtime
|
||||
func (r *Docker) LoadImage(path string) error {
|
||||
klog.Infof("Loading image: %s", path)
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", path))
|
||||
func (r *Docker) LoadImage(imgPath string) error {
|
||||
klog.Infof("Loading image: %s", imgPath)
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", imgPath))
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrap(err, "loadimage docker")
|
||||
}
|
||||
|
@ -336,9 +336,9 @@ func (r *Docker) PullImage(name string) error {
|
|||
}
|
||||
|
||||
// SaveImage saves an image from this runtime
|
||||
func (r *Docker) SaveImage(name string, path string) error {
|
||||
klog.Infof("Saving image %s: %s", name, path)
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, path))
|
||||
func (r *Docker) SaveImage(name string, imagePath string) error {
|
||||
klog.Infof("Saving image %s: %s", name, imagePath)
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, imagePath))
|
||||
if _, err := r.Runner.RunCmd(c); err != nil {
|
||||
return errors.Wrap(err, "saveimage docker")
|
||||
}
|
||||
|
@ -594,13 +594,14 @@ func (r *Docker) configureDocker(driver string) error {
|
|||
StorageDriver: "overlay2",
|
||||
}
|
||||
|
||||
if r.GPUs == "all" || r.GPUs == "nvidia" {
|
||||
switch r.GPUs {
|
||||
case "all", "nvidia":
|
||||
assets.Addons["nvidia-device-plugin"].EnableByDefault()
|
||||
daemonConfig.DefaultRuntime = "nvidia"
|
||||
runtimes := &dockerDaemonRuntimes{}
|
||||
runtimes.Nvidia.Path = "/usr/bin/nvidia-container-runtime"
|
||||
daemonConfig.Runtimes = runtimes
|
||||
} else if r.GPUs == "amd" {
|
||||
case "amd":
|
||||
assets.Addons["amd-gpu-device-plugin"].EnableByDefault()
|
||||
}
|
||||
|
||||
|
@ -624,11 +625,11 @@ func (r *Docker) Preload(cc config.ClusterConfig) error {
|
|||
cRuntime := cc.KubernetesConfig.ContainerRuntime
|
||||
|
||||
// If images already exist, return
|
||||
images, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
imgs, err := images.Kubeadm(cc.KubernetesConfig.ImageRepository, k8sVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting images")
|
||||
}
|
||||
if dockerImagesPreloaded(r.Runner, images) {
|
||||
if dockerImagesPreloaded(r.Runner, imgs) {
|
||||
klog.Info("Images already preloaded, skipping extraction")
|
||||
return nil
|
||||
}
|
||||
|
@ -687,7 +688,7 @@ func (r *Docker) Preload(cc config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
// dockerImagesPreloaded returns true if all images have been preloaded
|
||||
func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
||||
func dockerImagesPreloaded(runner command.Runner, imgs []string) bool {
|
||||
rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}"))
|
||||
if err != nil {
|
||||
klog.Warning(err)
|
||||
|
@ -702,7 +703,7 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
|
|||
klog.Infof("Got preloaded images: %s", rr.Output())
|
||||
|
||||
// Make sure images == imgs
|
||||
for _, i := range images {
|
||||
for _, i := range imgs {
|
||||
i = image.TrimDockerIO(i)
|
||||
if _, ok := preloadedImages[i]; !ok {
|
||||
klog.Infof("%s wasn't preloaded", i)
|
||||
|
@ -759,8 +760,8 @@ func dockerBoundToContainerd(runner command.Runner) bool {
|
|||
}
|
||||
|
||||
// ImagesPreloaded returns true if all images have been preloaded
|
||||
func (r *Docker) ImagesPreloaded(images []string) bool {
|
||||
return dockerImagesPreloaded(r.Runner, images)
|
||||
func (r *Docker) ImagesPreloaded(imgs []string) bool {
|
||||
return dockerImagesPreloaded(r.Runner, imgs)
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
@ -67,8 +67,8 @@ func LocalISOResource(isoURL string) string {
|
|||
}
|
||||
|
||||
// fileURI returns a file:// URI for a path
|
||||
func fileURI(path string) string {
|
||||
return "file://" + filepath.ToSlash(path)
|
||||
func fileURI(filePath string) string {
|
||||
return "file://" + filepath.ToSlash(filePath)
|
||||
}
|
||||
|
||||
// localISOPath returns where an ISO should be stored locally
|
||||
|
|
|
@ -250,10 +250,10 @@ func saveChecksumFile(k8sVersion, containerRuntime string, checksum []byte) erro
|
|||
|
||||
// verifyChecksum returns true if the checksum of the local binary matches
|
||||
// the checksum of the remote binary
|
||||
func verifyChecksum(k8sVersion, containerRuntime, path string) error {
|
||||
klog.Infof("verifying checksum of %s ...", path)
|
||||
func verifyChecksum(k8sVersion, containerRuntime, binaryPath string) error {
|
||||
klog.Infof("verifying checksum of %s ...", binaryPath)
|
||||
// get md5 checksum of tarball path
|
||||
contents, err := os.ReadFile(path)
|
||||
contents, err := os.ReadFile(binaryPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading tarball")
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func verifyChecksum(k8sVersion, containerRuntime, path string) error {
|
|||
|
||||
// create a slice of checksum, which is [16]byte
|
||||
if string(remoteChecksum) != string(checksum[:]) {
|
||||
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", path, string(remoteChecksum), string(checksum[:]))
|
||||
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", binaryPath, string(remoteChecksum), string(checksum[:]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ func extractDriverVersion(s string) string {
|
|||
return strings.TrimPrefix(v, "v")
|
||||
}
|
||||
|
||||
func driverExists(driver string) bool {
|
||||
_, err := exec.LookPath(driver)
|
||||
func driverExists(driverName string) bool {
|
||||
_, err := exec.LookPath(driverName)
|
||||
return err == nil
|
||||
}
|
||||
|
|
|
@ -38,28 +38,28 @@ func IsBootpdBlocked(cc config.ClusterConfig) bool {
|
|||
if cc.Driver != driver.QEMU2 || runtime.GOOS != "darwin" || cc.Network != "socket_vmnet" {
|
||||
return false
|
||||
}
|
||||
out, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output()
|
||||
rest, err := exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getglobalstate").Output()
|
||||
if err != nil {
|
||||
klog.Warningf("failed to get firewall state: %v", err)
|
||||
return false
|
||||
}
|
||||
if regexp.MustCompile(`Firewall is disabled`).Match(out) {
|
||||
if regexp.MustCompile(`Firewall is disabled`).Match(rest) {
|
||||
return false
|
||||
}
|
||||
out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output()
|
||||
rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--getallowsigned").Output()
|
||||
if err != nil {
|
||||
// macOS < 15 or other issue: need to use --list.
|
||||
klog.Warningf("failed to list firewall allowedsinged option: %v", err)
|
||||
// macOS >= 15: bootpd may be allowed as builtin software
|
||||
} else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(out) {
|
||||
} else if regexp.MustCompile(`Automatically allow built-in signed software ENABLED`).Match(rest) {
|
||||
return false
|
||||
}
|
||||
out, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output()
|
||||
rest, err = exec.Command("/usr/libexec/ApplicationFirewall/socketfilterfw", "--listapps").Output()
|
||||
if err != nil {
|
||||
klog.Warningf("failed to list firewall apps: %v", err)
|
||||
return false
|
||||
}
|
||||
return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(out)
|
||||
return !regexp.MustCompile(`\/usr\/libexec\/bootpd.*\n.*\( Allow`).Match(rest)
|
||||
}
|
||||
|
||||
// UnblockBootpd adds bootpd to the built-in macOS firewall and then unblocks it
|
||||
|
|
|
@ -340,6 +340,6 @@ func normalizeTagName(image string) string {
|
|||
|
||||
// Remove docker.io prefix since it won't be included in image names
|
||||
// when we call `docker images`.
|
||||
func TrimDockerIO(name string) string {
|
||||
return strings.TrimPrefix(name, "docker.io/")
|
||||
func TrimDockerIO(imageName string) string {
|
||||
return strings.TrimPrefix(imageName, "docker.io/")
|
||||
}
|
||||
|
|
|
@ -184,9 +184,9 @@ func replaceWinDriveLetterToVolumeName(s string) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path := vname + s[3:]
|
||||
p := vname + s[3:]
|
||||
|
||||
return path, nil
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getWindowsVolumeNameCmd(d string) (string, error) {
|
||||
|
|
|
@ -42,7 +42,7 @@ import (
|
|||
var buildRoot = path.Join(vmpath.GuestPersistentDir, "build")
|
||||
|
||||
// BuildImage builds image to all profiles
|
||||
func BuildImage(path string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error {
|
||||
func BuildImage(srcPath string, file string, tag string, push bool, env []string, opt []string, profiles []*config.Profile, allNodes bool, nodeName string) error {
|
||||
api, err := NewAPIClient()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "api")
|
||||
|
@ -52,12 +52,12 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o
|
|||
succeeded := []string{}
|
||||
failed := []string{}
|
||||
|
||||
u, err := url.Parse(path)
|
||||
u, err := url.Parse(srcPath)
|
||||
if err == nil && u.Scheme == "file" {
|
||||
path = u.Path
|
||||
srcPath = u.Path
|
||||
}
|
||||
remote := err == nil && u.Scheme != ""
|
||||
if runtime.GOOS == "windows" && filepath.VolumeName(path) != "" {
|
||||
if runtime.GOOS == "windows" && filepath.VolumeName(srcPath) != "" {
|
||||
remote = false
|
||||
}
|
||||
|
||||
|
@ -116,9 +116,9 @@ func BuildImage(path string, file string, tag string, push bool, env []string, o
|
|||
return err
|
||||
}
|
||||
if remote {
|
||||
err = buildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt)
|
||||
err = buildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt)
|
||||
} else {
|
||||
err = transferAndBuildImage(cr, c.KubernetesConfig, path, file, tag, push, env, opt)
|
||||
err = transferAndBuildImage(cr, c.KubernetesConfig, srcPath, file, tag, push, env, opt)
|
||||
}
|
||||
if err != nil {
|
||||
failed = append(failed, m)
|
||||
|
|
|
@ -73,19 +73,19 @@ func CacheImagesForBootstrapper(imageRepository, version string) error {
|
|||
}
|
||||
|
||||
// LoadCachedImages loads previously cached images into the container runtime
|
||||
func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string, overwrite bool) error {
|
||||
func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, imgs []string, cacheDir string, overwrite bool) error {
|
||||
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "runtime")
|
||||
}
|
||||
|
||||
// Skip loading images if images already exist
|
||||
if !overwrite && cr.ImagesPreloaded(images) {
|
||||
if !overwrite && cr.ImagesPreloaded(imgs) {
|
||||
klog.Infof("Images are preloaded, skipping loading")
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.Infof("LoadCachedImages start: %s", images)
|
||||
klog.Infof("LoadCachedImages start: %s", imgs)
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -102,19 +102,19 @@ func LoadCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
|
|||
}
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range imgs {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
// Put a ten second limit on deciding if an image needs transfer
|
||||
// because it takes much less than that time to just transfer the image.
|
||||
// This is needed because if running in offline mode, we can spend minutes here
|
||||
// waiting for i/o timeout.
|
||||
err := timedNeedsTransfer(imgClient, image, cr, 10*time.Second)
|
||||
err := timedNeedsTransfer(imgClient, img, cr, 10*time.Second)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
klog.Infof("%q needs transfer: %v", image, err)
|
||||
return transferAndLoadCachedImage(runner, cc.KubernetesConfig, image, cacheDir)
|
||||
klog.Infof("%q needs transfer: %v", img, err)
|
||||
return transferAndLoadCachedImage(runner, cc.KubernetesConfig, img, cacheDir)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -172,10 +172,10 @@ func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager
|
|||
// LoadLocalImages loads images into the container runtime
|
||||
func LoadLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string) error {
|
||||
var g errgroup.Group
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range images {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return transferAndLoadImage(runner, cc.KubernetesConfig, image, image)
|
||||
return transferAndLoadImage(runner, cc.KubernetesConfig, img, img)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -353,10 +353,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
|
|||
|
||||
var g errgroup.Group
|
||||
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range images {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return transferAndSaveCachedImage(runner, cc.KubernetesConfig, image, cacheDir)
|
||||
return transferAndSaveCachedImage(runner, cc.KubernetesConfig, img, cacheDir)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -369,10 +369,10 @@ func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []
|
|||
// SaveLocalImages saves images from the container runtime
|
||||
func SaveLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string, output string) error {
|
||||
var g errgroup.Group
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range images {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return transferAndSaveImage(runner, cc.KubernetesConfig, output, image)
|
||||
return transferAndSaveImage(runner, cc.KubernetesConfig, output, img)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -527,8 +527,8 @@ func transferAndSaveImage(cr command.Runner, k8s config.KubernetesConfig, dst st
|
|||
}
|
||||
|
||||
// pullImages pulls images to the container run time
|
||||
func pullImages(cruntime cruntime.Manager, images []string) error {
|
||||
klog.Infof("pullImages start: %s", images)
|
||||
func pullImages(crMgr cruntime.Manager, imgs []string) error {
|
||||
klog.Infof("pullImages start: %s", imgs)
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -537,10 +537,10 @@ func pullImages(cruntime cruntime.Manager, images []string) error {
|
|||
|
||||
var g errgroup.Group
|
||||
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range imgs {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return cruntime.PullImage(image)
|
||||
return crMgr.PullImage(img)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -588,11 +588,11 @@ func PullImages(images []string, profile *config.Profile) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating container runtime")
|
||||
}
|
||||
err = pullImages(cruntime, images)
|
||||
err = pullImages(crMgr, images)
|
||||
if err != nil {
|
||||
failed = append(failed, m)
|
||||
klog.Warningf("Failed to pull images for profile %s %v", pName, err.Error())
|
||||
|
@ -608,8 +608,8 @@ func PullImages(images []string, profile *config.Profile) error {
|
|||
}
|
||||
|
||||
// removeImages removes images from the container run time
|
||||
func removeImages(cruntime cruntime.Manager, images []string) error {
|
||||
klog.Infof("removeImages start: %s", images)
|
||||
func removeImages(crMgr cruntime.Manager, imgs []string) error {
|
||||
klog.Infof("removeImages start: %s", imgs)
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -618,10 +618,10 @@ func removeImages(cruntime cruntime.Manager, images []string) error {
|
|||
|
||||
var g errgroup.Group
|
||||
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range imgs {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return cruntime.RemoveImage(image)
|
||||
return crMgr.RemoveImage(img)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -669,11 +669,11 @@ func RemoveImages(images []string, profile *config.Profile) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating container runtime")
|
||||
}
|
||||
err = removeImages(cruntime, images)
|
||||
err = removeImages(crMgr, images)
|
||||
if err != nil {
|
||||
failed = append(failed, m)
|
||||
klog.Warningf("Failed to remove images for profile %s %v", pName, err.Error())
|
||||
|
@ -757,19 +757,19 @@ func ListImages(profile *config.Profile, format string) error {
|
|||
}
|
||||
renderImagesTable(data)
|
||||
case "json":
|
||||
json, err := json.Marshal(uniqueImages)
|
||||
jsondata, err := json.Marshal(uniqueImages)
|
||||
if err != nil {
|
||||
klog.Warningf("Error marshalling images list: %v", err.Error())
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("%s\n", json)
|
||||
fmt.Printf("%s\n", jsondata)
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(uniqueImages)
|
||||
yamldata, err := yaml.Marshal(uniqueImages)
|
||||
if err != nil {
|
||||
klog.Warningf("Error marshalling images list: %v", err.Error())
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("%s\n", yaml)
|
||||
fmt.Printf("%s\n", yamldata)
|
||||
default:
|
||||
res := []string{}
|
||||
for _, item := range uniqueImages {
|
||||
|
@ -892,11 +892,11 @@ func TagImage(profile *config.Profile, source string, target string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating container runtime")
|
||||
}
|
||||
err = cruntime.TagImage(source, target)
|
||||
err = crMgr.TagImage(source, target)
|
||||
if err != nil {
|
||||
failed = append(failed, m)
|
||||
klog.Warningf("Failed to tag image for profile %s %v", pName, err.Error())
|
||||
|
@ -912,8 +912,8 @@ func TagImage(profile *config.Profile, source string, target string) error {
|
|||
}
|
||||
|
||||
// pushImages pushes images from the container run time
|
||||
func pushImages(cruntime cruntime.Manager, images []string) error {
|
||||
klog.Infof("pushImages start: %s", images)
|
||||
func pushImages(crMgr cruntime.Manager, imgs []string) error {
|
||||
klog.Infof("pushImages start: %s", imgs)
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
|
@ -922,10 +922,10 @@ func pushImages(cruntime cruntime.Manager, images []string) error {
|
|||
|
||||
var g errgroup.Group
|
||||
|
||||
for _, image := range images {
|
||||
image := image
|
||||
for _, img := range imgs {
|
||||
img := img
|
||||
g.Go(func() error {
|
||||
return cruntime.PushImage(image)
|
||||
return crMgr.PushImage(img)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
|
@ -973,11 +973,11 @@ func PushImages(images []string, profile *config.Profile) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cruntime, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
crMgr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating container runtime")
|
||||
}
|
||||
err = pushImages(cruntime, images)
|
||||
err = pushImages(crMgr, images)
|
||||
if err != nil {
|
||||
failed = append(failed, m)
|
||||
klog.Warningf("Failed to push image for profile %s %v", pName, err.Error())
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
// deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file
|
||||
// used as last effort clean up not returning errors, won't warn user.
|
||||
func deleteOrphanedKIC(ociBin string, name string) {
|
||||
if !(ociBin == oci.Podman || ociBin == oci.Docker) {
|
||||
if ociBin != oci.Podman && ociBin != oci.Docker {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -68,8 +68,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
|
|||
delAbandoned = deleteAbandoned[0]
|
||||
}
|
||||
|
||||
host, err := api.Load(machineName)
|
||||
if err != nil && host == nil && delAbandoned {
|
||||
hostInfo, err := api.Load(machineName)
|
||||
if err != nil && hostInfo == nil && delAbandoned {
|
||||
deleteOrphanedKIC(oci.Docker, machineName)
|
||||
deleteOrphanedKIC(oci.Podman, machineName)
|
||||
// Keep going even if minikube does not know about the host
|
||||
|
@ -88,7 +88,7 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
|
|||
}
|
||||
|
||||
// some drivers need manual shut down before delete to avoid getting stuck.
|
||||
if driver.NeedsShutdown(host.Driver.DriverName()) {
|
||||
if driver.NeedsShutdown(hostInfo.Driver.DriverName()) {
|
||||
if err := StopHost(api, machineName); err != nil {
|
||||
klog.Warningf("stop host: %v", err)
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ func DeleteHost(api libmachine.API, machineName string, deleteAbandoned ...bool)
|
|||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName})
|
||||
return deleteHost(api, host, machineName)
|
||||
out.Step(style.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": hostInfo.DriverName})
|
||||
return deleteHost(api, hostInfo, machineName)
|
||||
}
|
||||
|
||||
// delete removes a host and its local data files
|
||||
|
|
|
@ -209,12 +209,12 @@ func ensureSyncedGuestClock(h hostRunner, drv string) error {
|
|||
// guestClockDelta returns the approximate difference between the host and guest system clock
|
||||
// NOTE: This does not currently take into account ssh latency.
|
||||
func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) {
|
||||
out, err := h.RunSSHCommand("date +%s.%N")
|
||||
rest, err := h.RunSSHCommand("date +%s.%N")
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "get clock")
|
||||
}
|
||||
klog.Infof("guest clock: %s", out)
|
||||
ns := strings.Split(strings.TrimSpace(out), ".")
|
||||
klog.Infof("guest clock: %s", rest)
|
||||
ns := strings.Split(strings.TrimSpace(rest), ".")
|
||||
secs, err := strconv.ParseInt(strings.TrimSpace(ns[0]), 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "atoi")
|
||||
|
@ -232,8 +232,8 @@ func guestClockDelta(h hostRunner, local time.Time) (time.Duration, error) {
|
|||
|
||||
// adjustGuestClock adjusts the guest system clock to be nearer to the host system clock
|
||||
func adjustGuestClock(h hostRunner, t time.Time) error {
|
||||
out, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix()))
|
||||
klog.Infof("clock set: %s (err=%v)", out, err)
|
||||
rest, err := h.RunSSHCommand(fmt.Sprintf("sudo date -s @%d", t.Unix()))
|
||||
klog.Infof("clock set: %s (err=%v)", rest, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -253,10 +253,12 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
|
|||
}
|
||||
|
||||
func machineExistsDocker(s state.State, err error) (bool, error) {
|
||||
if s == state.Error {
|
||||
|
||||
switch s {
|
||||
case state.Error:
|
||||
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
|
||||
return false, constants.ErrMachineMissing
|
||||
} else if s == state.None {
|
||||
case state.None:
|
||||
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
|
||||
return false, constants.ErrMachineMissing
|
||||
}
|
||||
|
|
|
@ -35,12 +35,12 @@ func Status(api libmachine.API, machineName string) (string, error) {
|
|||
return state.None.String(), nil
|
||||
}
|
||||
|
||||
host, err := api.Load(machineName)
|
||||
hostInfo, err := api.Load(machineName)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "load")
|
||||
}
|
||||
|
||||
s, err := host.Driver.GetState()
|
||||
s, err := hostInfo.Driver.GetState()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "state")
|
||||
}
|
||||
|
|
|
@ -193,11 +193,11 @@ func cachedCPUInfo() ([]cpu.InfoStat, error) {
|
|||
}
|
||||
|
||||
// ParseMemFree parses the output of the `free -m` command
|
||||
func parseMemFree(out string) (int64, error) {
|
||||
func parseMemFree(s string) (int64, error) {
|
||||
// total used free shared buff/cache available
|
||||
//Mem: 1987 706 194 1 1086 1173
|
||||
//Swap: 0 0 0
|
||||
outlines := strings.Split(out, "\n")
|
||||
outlines := strings.Split(s, "\n")
|
||||
l := len(outlines)
|
||||
for _, line := range outlines[1 : l-1] {
|
||||
parsedLine := strings.Fields(line)
|
||||
|
@ -217,10 +217,10 @@ func parseMemFree(out string) (int64, error) {
|
|||
}
|
||||
|
||||
// ParseDiskFree parses the output of the `df -m` command
|
||||
func parseDiskFree(out string) (int64, error) {
|
||||
func parseDiskFree(s string) (int64, error) {
|
||||
// Filesystem 1M-blocks Used Available Use% Mounted on
|
||||
// /dev/sda1 39643 3705 35922 10% /
|
||||
outlines := strings.Split(out, "\n")
|
||||
outlines := strings.Split(s, "\n")
|
||||
l := len(outlines)
|
||||
for _, line := range outlines[1 : l-1] {
|
||||
parsedLine := strings.Fields(line)
|
||||
|
|
|
@ -31,12 +31,12 @@ import (
|
|||
// GetHost find node's host information by name in the given cluster.
|
||||
func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
|
||||
machineName := config.MachineName(cc, n)
|
||||
host, err := LoadHost(api, machineName)
|
||||
hostInfo, err := LoadHost(api, machineName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "host exists and load")
|
||||
}
|
||||
|
||||
currentState, err := host.Driver.GetState()
|
||||
currentState, err := hostInfo.Driver.GetState()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "state")
|
||||
}
|
||||
|
@ -45,12 +45,12 @@ func GetHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
return nil, errors.Errorf("%q is not running", machineName)
|
||||
}
|
||||
|
||||
return host, nil
|
||||
return hostInfo, nil
|
||||
}
|
||||
|
||||
// CreateSSHShell creates a new SSH shell / client
|
||||
func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node, args []string, native bool) error {
|
||||
host, err := GetHost(api, cc, n)
|
||||
hostInfo, err := GetHost(api, cc, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node,
|
|||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
client, err := host.CreateSSHClient()
|
||||
client, err := hostInfo.CreateSSHClient()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Creating ssh client")
|
||||
|
@ -71,16 +71,16 @@ func CreateSSHShell(api libmachine.API, cc config.ClusterConfig, n config.Node,
|
|||
|
||||
// GetSSHHostAddrPort returns the host address and port for ssh
|
||||
func GetSSHHostAddrPort(api libmachine.API, cc config.ClusterConfig, n config.Node) (string, int, error) {
|
||||
host, err := GetHost(api, cc, n)
|
||||
hostInfo, err := GetHost(api, cc, n)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
addr, err := host.Driver.GetSSHHostname()
|
||||
addr, err := hostInfo.Driver.GetSSHHostname()
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
port, err := host.Driver.GetSSHPort()
|
||||
port, err := hostInfo.Driver.GetSSHPort()
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
|
|
@ -412,7 +412,7 @@ func AddHostAlias(c command.Runner, name string, ip net.IP) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func addHostAliasCommand(name string, record string, sudo bool, path string) *exec.Cmd {
|
||||
func addHostAliasCommand(name string, record string, sudo bool, destPath string) *exec.Cmd {
|
||||
sudoCmd := "sudo"
|
||||
if !sudo { // for testing
|
||||
sudoCmd = ""
|
||||
|
@ -421,9 +421,9 @@ func addHostAliasCommand(name string, record string, sudo bool, path string) *ex
|
|||
script := fmt.Sprintf(
|
||||
`{ grep -v $'\t%s$' "%s"; echo "%s"; } > /tmp/h.$$; %s cp /tmp/h.$$ "%s"`,
|
||||
name,
|
||||
path,
|
||||
destPath,
|
||||
record,
|
||||
sudoCmd,
|
||||
path)
|
||||
destPath)
|
||||
return exec.Command("/bin/bash", "-c", script)
|
||||
}
|
||||
|
|
|
@ -95,9 +95,9 @@ func trySSHPowerOff(h *host.Host) error {
|
|||
err := oci.ShutDown(h.DriverName, h.Name)
|
||||
klog.Infof("shutdown container: err=%v", err)
|
||||
} else {
|
||||
out, err := h.RunSSHCommand("sudo poweroff")
|
||||
rest, err := h.RunSSHCommand("sudo poweroff")
|
||||
// poweroff always results in an error, since the host disconnects.
|
||||
klog.Infof("poweroff result: out=%s, err=%v", out, err)
|
||||
klog.Infof("poweroff result: out=%s, err=%v", rest, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -132,7 +132,7 @@ func running(name string, first bool) []ClusterController {
|
|||
continue
|
||||
}
|
||||
|
||||
host, err := machine.LoadHost(api, machineName)
|
||||
hostInfo, err := machine.LoadHost(api, machineName)
|
||||
if err != nil {
|
||||
if last {
|
||||
exit.Message(reason.GuestLoadHost, `Unable to load control-plane node {{.name}} host: {{.err}}`, out.V{"name": machineName, "err": err})
|
||||
|
@ -141,7 +141,7 @@ func running(name string, first bool) []ClusterController {
|
|||
continue
|
||||
}
|
||||
|
||||
cr, err := machine.CommandRunner(host)
|
||||
cr, err := machine.CommandRunner(hostInfo)
|
||||
if err != nil {
|
||||
if last {
|
||||
exit.Message(reason.InternalCommandRunner, `Unable to get control-plane node {{.name}} host command runner: {{.err}}`, out.V{"name": machineName, "err": err})
|
||||
|
@ -150,7 +150,7 @@ func running(name string, first bool) []ClusterController {
|
|||
continue
|
||||
}
|
||||
|
||||
hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, host.DriverName)
|
||||
hostname, ip, port, err := driver.ControlPlaneEndpoint(cc, &cp, hostInfo.DriverName)
|
||||
if err != nil {
|
||||
if last {
|
||||
exit.Message(reason.DrvCPEndpoint, `Unable to get control-plane node {{.name}} endpoint: {{.err}}`, out.V{"name": machineName, "err": err})
|
||||
|
@ -164,7 +164,7 @@ func running(name string, first bool) []ClusterController {
|
|||
Config: cc,
|
||||
CP: ControlPlane{
|
||||
Runner: cr,
|
||||
Host: host,
|
||||
Host: hostInfo,
|
||||
Node: &cp,
|
||||
Hostname: hostname,
|
||||
IP: ip,
|
||||
|
@ -223,8 +223,8 @@ func Healthy(name string) ClusterController {
|
|||
|
||||
// exitTip returns an action tip and exits
|
||||
func exitTip(action string, profile string, code int) {
|
||||
command := ExampleCmd(profile, action)
|
||||
out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": command})
|
||||
cmd := ExampleCmd(profile, action)
|
||||
out.Styled(style.Workaround, `To start a cluster, run: "{{.command}}"`, out.V{"command": cmd})
|
||||
exit.Code(code)
|
||||
}
|
||||
|
||||
|
|
|
@ -282,12 +282,12 @@ func imagesInConfigFile() ([]string, error) {
|
|||
}
|
||||
|
||||
func updateKicImageRepo(imgName string, repo string) string {
|
||||
image := strings.TrimPrefix(imgName, "gcr.io/")
|
||||
imageName := strings.TrimPrefix(imgName, "gcr.io/")
|
||||
if repo == constants.AliyunMirror {
|
||||
// for aliyun registry must strip namespace from image name, e.g.
|
||||
// registry.cn-hangzhou.aliyuncs.com/google_containers/k8s-minikube/kicbase:v0.0.25 will not work
|
||||
// registry.cn-hangzhou.aliyuncs.com/google_containers/kicbase:v0.0.25 does work
|
||||
image = strings.TrimPrefix(image, "k8s-minikube/")
|
||||
imageName = strings.TrimPrefix(imageName, "k8s-minikube/")
|
||||
}
|
||||
return path.Join(repo, image)
|
||||
return path.Join(repo, imageName)
|
||||
}
|
||||
|
|
|
@ -522,8 +522,8 @@ func cgroupDriver(cc config.ClusterConfig) string {
|
|||
return detect.CgroupDriver()
|
||||
}
|
||||
|
||||
func pathExists(runner cruntime.CommandRunner, path string) (bool, error) {
|
||||
_, err := runner.RunCmd(exec.Command("stat", path))
|
||||
func pathExists(runner cruntime.CommandRunner, p string) (bool, error) {
|
||||
_, err := runner.RunCmd(exec.Command("stat", p))
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
@ -624,18 +624,18 @@ func setupKubeadm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node,
|
|||
|
||||
// setupKubeconfig generates kubeconfig.
|
||||
func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, clusterName string) *kubeconfig.Settings {
|
||||
host := cc.KubernetesConfig.APIServerHAVIP
|
||||
hostIP := cc.KubernetesConfig.APIServerHAVIP
|
||||
port := cc.APIServerPort
|
||||
if !config.IsHA(cc) || driver.NeedsPortForward(cc.Driver) {
|
||||
var err error
|
||||
if host, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil {
|
||||
if hostIP, _, port, err = driver.ControlPlaneEndpoint(&cc, &n, h.DriverName); err != nil {
|
||||
exit.Message(reason.DrvCPEndpoint, fmt.Sprintf("failed to construct cluster server address: %v", err), out.V{"profileArg": fmt.Sprintf("--profile=%s", clusterName)})
|
||||
}
|
||||
}
|
||||
addr := fmt.Sprintf("https://%s", net.JoinHostPort(host, strconv.Itoa(port)))
|
||||
addr := fmt.Sprintf("https://%s", net.JoinHostPort(hostIP, strconv.Itoa(port)))
|
||||
|
||||
if cc.KubernetesConfig.APIServerName != constants.APIServerName {
|
||||
addr = strings.ReplaceAll(addr, host, cc.KubernetesConfig.APIServerName)
|
||||
addr = strings.ReplaceAll(addr, hostIP, cc.KubernetesConfig.APIServerName)
|
||||
}
|
||||
|
||||
kcs := &kubeconfig.Settings{
|
||||
|
@ -654,29 +654,29 @@ func setupKubeconfig(h host.Host, cc config.ClusterConfig, n config.Node, cluste
|
|||
}
|
||||
|
||||
// StartMachine starts a VM
|
||||
func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host, err error) {
|
||||
func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool) (runner command.Runner, preExists bool, machineAPI libmachine.API, hostInfo *host.Host, err error) {
|
||||
m, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
return runner, preExists, m, host, errors.Wrap(err, "Failed to get machine client")
|
||||
return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get machine client")
|
||||
}
|
||||
host, preExists, err = startHostInternal(m, cfg, node, delOnFail)
|
||||
hostInfo, preExists, err = startHostInternal(m, cfg, node, delOnFail)
|
||||
if err != nil {
|
||||
return runner, preExists, m, host, errors.Wrap(err, "Failed to start host")
|
||||
return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to start host")
|
||||
}
|
||||
runner, err = machine.CommandRunner(host)
|
||||
runner, err = machine.CommandRunner(hostInfo)
|
||||
if err != nil {
|
||||
return runner, preExists, m, host, errors.Wrap(err, "Failed to get command runner")
|
||||
return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to get command runner")
|
||||
}
|
||||
|
||||
ip, err := validateNetwork(host, runner, cfg.KubernetesConfig.ImageRepository)
|
||||
ip, err := validateNetwork(hostInfo, runner, cfg.KubernetesConfig.ImageRepository)
|
||||
if err != nil {
|
||||
return runner, preExists, m, host, errors.Wrap(err, "Failed to validate network")
|
||||
return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to validate network")
|
||||
}
|
||||
|
||||
if driver.IsQEMU(host.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) {
|
||||
if driver.IsQEMU(hostInfo.Driver.DriverName()) && network.IsBuiltinQEMU(cfg.Network) {
|
||||
apiServerPort, err := getPort()
|
||||
if err != nil {
|
||||
return runner, preExists, m, host, errors.Wrap(err, "Failed to find apiserver port")
|
||||
return runner, preExists, m, hostInfo, errors.Wrap(err, "Failed to find apiserver port")
|
||||
}
|
||||
cfg.APIServerPort = apiServerPort
|
||||
}
|
||||
|
@ -687,7 +687,7 @@ func startMachine(cfg *config.ClusterConfig, node *config.Node, delOnFail bool)
|
|||
out.FailureT("Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", out.V{"ip": ip})
|
||||
}
|
||||
|
||||
return runner, preExists, m, host, err
|
||||
return runner, preExists, m, hostInfo, err
|
||||
}
|
||||
|
||||
// getPort asks the kernel for a free open port that is ready to use
|
||||
|
@ -707,9 +707,9 @@ func getPort() (int, error) {
|
|||
|
||||
// startHostInternal starts a new minikube host using a VM or None
|
||||
func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.Node, delOnFail bool) (*host.Host, bool, error) {
|
||||
host, exists, err := machine.StartHost(api, cc, n)
|
||||
hostInfo, exists, err := machine.StartHost(api, cc, n)
|
||||
if err == nil {
|
||||
return host, exists, nil
|
||||
return hostInfo, exists, nil
|
||||
}
|
||||
klog.Warningf("error starting host: %v", err)
|
||||
// NOTE: People get very cranky if you delete their preexisting VM. Only delete new ones.
|
||||
|
@ -722,7 +722,7 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N
|
|||
|
||||
if err, ff := errors.Cause(err).(*oci.FailFastError); ff {
|
||||
klog.Infof("will skip retrying to create machine because error is not retriable: %v", err)
|
||||
return host, exists, err
|
||||
return hostInfo, exists, err
|
||||
}
|
||||
|
||||
out.ErrT(style.Embarrassed, "StartHost failed, but will try again: {{.error}}", out.V{"error": err})
|
||||
|
@ -739,15 +739,15 @@ func startHostInternal(api libmachine.API, cc *config.ClusterConfig, n *config.N
|
|||
}
|
||||
}
|
||||
|
||||
host, exists, err = machine.StartHost(api, cc, n)
|
||||
hostInfo, exists, err = machine.StartHost(api, cc, n)
|
||||
if err == nil {
|
||||
return host, exists, nil
|
||||
return hostInfo, exists, nil
|
||||
}
|
||||
|
||||
// Don't use host.Driver to avoid nil pointer deref
|
||||
drv := cc.Driver
|
||||
out.ErrT(style.Sad, `Failed to start {{.driver}} {{.driver_type}}. Running "{{.cmd}}" may fix it: {{.error}}`, out.V{"driver": drv, "driver_type": driver.MachineType(drv), "cmd": mustload.ExampleCmd(cc.Name, "delete"), "error": err})
|
||||
return host, exists, err
|
||||
return hostInfo, exists, err
|
||||
}
|
||||
|
||||
// validateNetwork tries to catch network problems as soon as possible
|
||||
|
@ -760,7 +760,8 @@ func validateNetwork(h *host.Host, r command.Runner, imageRepository string) (st
|
|||
optSeen := false
|
||||
warnedOnce := false
|
||||
for _, k := range proxy.EnvVars {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
v := os.Getenv(k)
|
||||
if v != "" {
|
||||
if !optSeen {
|
||||
out.Styled(style.Internet, "Found network options:")
|
||||
optSeen = true
|
||||
|
@ -847,9 +848,9 @@ func tryRegistry(r command.Runner, driverName, imageRepository, ip string) {
|
|||
// 2 second timeout. For best results, call tryRegistry in a non-blocking manner.
|
||||
opts := []string{"-sS", "-m", "2"}
|
||||
|
||||
proxy := os.Getenv("HTTPS_PROXY")
|
||||
if proxy != "" && !strings.HasPrefix(proxy, "localhost") && !strings.HasPrefix(proxy, "127.0") {
|
||||
opts = append([]string{"-x", proxy}, opts...)
|
||||
httpsProxy := os.Getenv("HTTPS_PROXY")
|
||||
if httpsProxy != "" && !strings.HasPrefix(httpsProxy, "localhost") && !strings.HasPrefix(httpsProxy, "127.0") {
|
||||
opts = append([]string{"-x", httpsProxy}, opts...)
|
||||
}
|
||||
|
||||
if imageRepository == "" {
|
||||
|
@ -931,16 +932,16 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo
|
|||
|
||||
// get current coredns configmap via kubectl
|
||||
get := fmt.Sprintf("sudo %s --kubeconfig=%s -n kube-system get configmap coredns -o yaml", kubectl, kubecfg)
|
||||
out, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get))
|
||||
rest, err := runner.RunCmd(exec.Command("/bin/bash", "-c", get))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get current CoreDNS ConfigMap: %v", err)
|
||||
return err
|
||||
}
|
||||
cm := strings.TrimSpace(out.Stdout.String())
|
||||
cm := strings.TrimSpace(rest.Stdout.String())
|
||||
|
||||
// check if this specific host entry already exists in coredns configmap, so not to duplicate/override it
|
||||
host := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name))
|
||||
if host.MatchString(cm) {
|
||||
hostInfo := regexp.MustCompile(fmt.Sprintf(`(?smU)^ *hosts {.*%s.*}`, name))
|
||||
if hostInfo.MatchString(cm) {
|
||||
klog.Infof("CoreDNS already contains %q host record, skipping...", name)
|
||||
return nil
|
||||
}
|
||||
|
@ -956,8 +957,8 @@ func addCoreDNSEntry(runner command.Runner, name, ip string, cc config.ClusterCo
|
|||
}
|
||||
|
||||
// check if logging is already enabled (via log plugin) in coredns configmap, so not to duplicate it
|
||||
logs := regexp.MustCompile(`(?smU)^ *log *$`)
|
||||
if !logs.MatchString(cm) {
|
||||
regex := regexp.MustCompile(`(?smU)^ *log *$`)
|
||||
if !regex.MatchString(cm) {
|
||||
// inject log plugin into coredns configmap
|
||||
sed = fmt.Sprintf("%s -e '/^ errors *$/i \\ log'", sed)
|
||||
}
|
||||
|
|
|
@ -96,21 +96,21 @@ func maybePrintBetaUpdateText(betaReleasesURL string, localVersion semver.Versio
|
|||
return true
|
||||
}
|
||||
|
||||
func printUpdateTextCommon(version semver.Version) {
|
||||
func printUpdateTextCommon(ver semver.Version) {
|
||||
if err := writeTimeToFile(lastUpdateCheckFilePath, time.Now().UTC()); err != nil {
|
||||
klog.Errorf("write time failed: %v", err)
|
||||
}
|
||||
url := "https://github.com/kubernetes/minikube/releases/tag/v" + version.String()
|
||||
out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": version, "url": url})
|
||||
url := "https://github.com/kubernetes/minikube/releases/tag/v" + ver.String()
|
||||
out.Styled(style.Celebrate, `minikube {{.version}} is available! Download it: {{.url}}`, out.V{"version": ver, "url": url})
|
||||
}
|
||||
|
||||
func printUpdateText(version semver.Version) {
|
||||
printUpdateTextCommon(version)
|
||||
func printUpdateText(ver semver.Version) {
|
||||
printUpdateTextCommon(ver)
|
||||
out.Styled(style.Tip, "To disable this notice, run: 'minikube config set WantUpdateNotification false'\n")
|
||||
}
|
||||
|
||||
func printBetaUpdateText(version semver.Version) {
|
||||
printUpdateTextCommon(version)
|
||||
func printBetaUpdateText(ver semver.Version) {
|
||||
printUpdateTextCommon(ver)
|
||||
out.Styled(style.Tip, "To disable beta notices, run: 'minikube config set WantBetaUpdateNotification false'")
|
||||
out.Styled(style.Tip, "To disable update notices in general, run: 'minikube config set WantUpdateNotification false'\n")
|
||||
}
|
||||
|
@ -248,14 +248,14 @@ func timeFromFileIfExists(path string) time.Time {
|
|||
}
|
||||
|
||||
// DownloadURL returns a URL to get minikube binary version ver for platform os/arch
|
||||
func DownloadURL(ver, os, arch string) string {
|
||||
if ver == "" || strings.HasSuffix(ver, "-unset") || os == "" || arch == "" {
|
||||
func DownloadURL(ver, osName, arch string) string {
|
||||
if ver == "" || strings.HasSuffix(ver, "-unset") || osName == "" || arch == "" {
|
||||
return "https://github.com/kubernetes/minikube/releases"
|
||||
}
|
||||
sfx := ""
|
||||
if os == "windows" {
|
||||
if osName == "windows" {
|
||||
sfx = ".exe"
|
||||
}
|
||||
return fmt.Sprintf("https://github.com/kubernetes/minikube/releases/download/%s/minikube-%s-%s%s",
|
||||
ver, os, arch, sfx)
|
||||
ver, osName, arch, sfx)
|
||||
}
|
||||
|
|
|
@ -107,8 +107,8 @@ func Styled(st style.Enum, format string, a ...V) {
|
|||
Infof(format, a...)
|
||||
return
|
||||
}
|
||||
outStyled, spinner := stylized(st, useColor, format, a...)
|
||||
if spinner {
|
||||
outStyled, useSpinner := stylized(st, useColor, format, a...)
|
||||
if useSpinner {
|
||||
spinnerString(outStyled)
|
||||
} else {
|
||||
String(outStyled)
|
||||
|
@ -116,12 +116,12 @@ func Styled(st style.Enum, format string, a ...V) {
|
|||
}
|
||||
|
||||
func boxedCommon(printFunc func(format string, a ...interface{}), cfg box.Config, title string, format string, a ...V) {
|
||||
box := box.New(cfg)
|
||||
b := box.New(cfg)
|
||||
if !useColor {
|
||||
box.Config.Color = nil
|
||||
b.Config.Color = nil
|
||||
}
|
||||
str := Sprintf(style.None, format, a...)
|
||||
printFunc(box.String(title, strings.TrimSpace(str)))
|
||||
printFunc(b.String(title, strings.TrimSpace(str)))
|
||||
}
|
||||
|
||||
// Boxed writes a stylized and templated message in a box to stdout using the default style config
|
||||
|
|
|
@ -51,13 +51,13 @@ func timeCommandLogs(cmd *exec.Cmd) (*result, error) {
|
|||
var timings []float64
|
||||
|
||||
for scanner.Scan() {
|
||||
log := scanner.Text()
|
||||
logData := scanner.Text()
|
||||
// this is the time it took to complete the previous log
|
||||
timeTaken := time.Since(timer).Seconds()
|
||||
klog.Infof("%f: %s", timeTaken, log)
|
||||
klog.Infof("%f: %s", timeTaken, logData)
|
||||
|
||||
timer = time.Now()
|
||||
logs = append(logs, log)
|
||||
logs = append(logs, logData)
|
||||
timings = append(timings, timeTaken)
|
||||
}
|
||||
// add the time it took to get from the final log to finishing the command
|
||||
|
|
|
@ -63,12 +63,12 @@ func CompareMinikubeStart(ctx context.Context, binaries []*Binary) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func collectResults(ctx context.Context, binaries []*Binary, driver string, runtime string) (*resultManager, error) {
|
||||
func collectResults(ctx context.Context, binaries []*Binary, driver string, runtimeName string) (*resultManager, error) {
|
||||
rm := newResultManager()
|
||||
for run := 0; run < runs; run++ {
|
||||
log.Printf("Executing run %d/%d...", run+1, runs)
|
||||
for _, binary := range binaries {
|
||||
r, err := timeMinikubeStart(ctx, binary, driver, runtime)
|
||||
r, err := timeMinikubeStart(ctx, binary, driver, runtimeName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name())
|
||||
}
|
||||
|
@ -97,9 +97,9 @@ func average(nums []float64) float64 {
|
|||
return total / float64(len(nums))
|
||||
}
|
||||
|
||||
func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtime string) error {
|
||||
func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, runtimeName string) error {
|
||||
for _, b := range binaries {
|
||||
c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime))
|
||||
c := exec.CommandContext(ctx, b.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName))
|
||||
c.Stderr = os.Stderr
|
||||
log.Printf("Running: %v...", c.Args)
|
||||
if err := c.Run(); err != nil {
|
||||
|
@ -115,8 +115,8 @@ func downloadArtifacts(ctx context.Context, binaries []*Binary, driver string, r
|
|||
}
|
||||
|
||||
// timeMinikubeStart returns the time it takes to execute `minikube start`
|
||||
func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtime string) (*result, error) {
|
||||
startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtime))
|
||||
func timeMinikubeStart(ctx context.Context, binary *Binary, driver string, runtimeName string) (*result, error) {
|
||||
startCmd := exec.CommandContext(ctx, binary.path, "start", fmt.Sprintf("--driver=%s", driver), fmt.Sprintf("--container-runtime=%s", runtimeName))
|
||||
startCmd.Stderr = os.Stderr
|
||||
|
||||
r, err := timeCommandLogs(startCmd)
|
||||
|
@ -147,6 +147,6 @@ func skipIngress(driver string) bool {
|
|||
// We only want to run the tests if:
|
||||
// 1. It's a VM driver and docker container runtime
|
||||
// 2. It's docker driver with any container runtime
|
||||
func proceed(driver string, runtime string) bool {
|
||||
return runtime == "docker" || driver == "docker"
|
||||
func proceed(driver string, runtimeName string) bool {
|
||||
return runtimeName == "docker" || driver == "docker"
|
||||
}
|
||||
|
|
|
@ -66,8 +66,8 @@ func init() {
|
|||
}
|
||||
|
||||
// GetCoreClient returns a core client
|
||||
func (k *K8sClientGetter) GetCoreClient(context string) (typed_core.CoreV1Interface, error) {
|
||||
client, err := kapi.Client(context)
|
||||
func (k *K8sClientGetter) GetCoreClient(ctx string) (typed_core.CoreV1Interface, error) {
|
||||
client, err := kapi.Client(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "client")
|
||||
}
|
||||
|
@ -288,8 +288,8 @@ func WaitForService(api libmachine.API, cname string, namespace string, service
|
|||
}
|
||||
|
||||
for _, bareURLString := range serviceURL.URLs {
|
||||
url, _ := OptionallyHTTPSFormattedURLString(bareURLString, https)
|
||||
urlList = append(urlList, url)
|
||||
urlString, _ := OptionallyHTTPSFormattedURLString(bareURLString, https)
|
||||
urlList = append(urlList, urlString)
|
||||
}
|
||||
return urlList, nil
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ func getServiceListFromServicesByLabel(services typed_core.ServiceInterface, key
|
|||
}
|
||||
|
||||
// CreateSecret creates or modifies secrets
|
||||
func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labels map[string]string) error {
|
||||
func CreateSecret(cname string, namespace, name string, dataValues map[string]string, labelData map[string]string) error {
|
||||
client, err := K8s.GetCoreClient(cname)
|
||||
if err != nil {
|
||||
return &retry.RetriableError{Err: err}
|
||||
|
@ -344,7 +344,7 @@ func CreateSecret(cname string, namespace, name string, dataValues map[string]st
|
|||
secretObj := &core.Secret{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
Labels: labelData,
|
||||
},
|
||||
Data: data,
|
||||
Type: core.SecretTypeOpaque,
|
||||
|
|
|
@ -165,11 +165,11 @@ func Detect() (string, error) {
|
|||
}
|
||||
|
||||
func (c EnvConfig) getShell() shellData {
|
||||
shell, ok := shellConfigMap[c.Shell]
|
||||
shellData, ok := shellConfigMap[c.Shell]
|
||||
if !ok {
|
||||
shell = defaultShell
|
||||
shellData = defaultShell
|
||||
}
|
||||
return shell
|
||||
return shellData
|
||||
}
|
||||
|
||||
func generateUsageHint(ec EnvConfig, usgPlz, usgCmd string) string {
|
||||
|
|
|
@ -71,8 +71,8 @@ func SetDefaultStorageClass(storage storagev1.StorageV1Interface, name string) e
|
|||
}
|
||||
|
||||
// GetStoragev1 return storage v1 interface for client
|
||||
func GetStoragev1(context string) (storagev1.StorageV1Interface, error) {
|
||||
client, err := kapi.Client(context)
|
||||
func GetStoragev1(ctx string) (storagev1.StorageV1Interface, error) {
|
||||
client, err := kapi.Client(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -137,10 +137,10 @@ func (api *MockAPI) Remove(name string) error {
|
|||
}
|
||||
|
||||
// Save saves a host to disk.
|
||||
func (api *MockAPI) Save(host *host.Host) error {
|
||||
func (api *MockAPI) Save(hostInfo *host.Host) error {
|
||||
api.SaveCalled = true
|
||||
api.Logf("MockAPI.Save: %+v", host)
|
||||
return api.FakeStore.Save(host)
|
||||
api.Logf("MockAPI.Save: %+v", hostInfo)
|
||||
return api.FakeStore.Save(hostInfo)
|
||||
}
|
||||
|
||||
// GetMachinesDir returns the directory to store machines in.
|
||||
|
|
|
@ -80,10 +80,10 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) {
|
|||
return hostState, route, nil
|
||||
}
|
||||
|
||||
func getRoute(host *host.Host, clusterConfig config.ClusterConfig) (*Route, error) {
|
||||
hostDriverIP, err := host.Driver.GetIP()
|
||||
func getRoute(hostInfo *host.Host, clusterConfig config.ClusterConfig) (*Route, error) {
|
||||
hostDriverIP, err := hostInfo.Driver.GetIP()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting host IP for %s", host.Name)
|
||||
return nil, errors.Wrapf(err, "error getting host IP for %s", hostInfo.Name)
|
||||
}
|
||||
|
||||
_, ipNet, err := net.ParseCIDR(clusterConfig.KubernetesConfig.ServiceCIDR)
|
||||
|
|
|
@ -28,7 +28,8 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ServiceTunnel ...
|
||||
// ServiceTunnel manages an SSH tunnel for a Kubernetes service.
|
||||
// It holds configuration for the SSH connection and the tunnel's state.
|
||||
type ServiceTunnel struct {
|
||||
sshPort string
|
||||
sshKey string
|
||||
|
@ -37,7 +38,11 @@ type ServiceTunnel struct {
|
|||
suppressStdOut bool
|
||||
}
|
||||
|
||||
// NewServiceTunnel ...
|
||||
// NewServiceTunnel creates and returns a new ServiceTunnel instance.
|
||||
// sshPort is the port number for the SSH connection.
|
||||
// sshKey is the path to the SSH private key file.
|
||||
// v1Core is the Kubernetes CoreV1 client interface for interacting with services.
|
||||
// suppressStdOut controls whether standard output from the tunnel process should be suppressed.
|
||||
func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface, suppressStdOut bool) *ServiceTunnel {
|
||||
return &ServiceTunnel{
|
||||
sshPort: sshPort,
|
||||
|
@ -47,7 +52,12 @@ func NewServiceTunnel(sshPort, sshKey string, v1Core typed_core.CoreV1Interface,
|
|||
}
|
||||
}
|
||||
|
||||
// Start ...
|
||||
// Start establishes an SSH tunnel for the specified Kubernetes service.
|
||||
// It retrieves service details, creates an SSH connection with random local ports
|
||||
// for each service port, and starts the tunnel in a new goroutine.
|
||||
// It returns a slice of URLs (e.g., "http://127.0.0.1:local_port") corresponding
|
||||
// to the tunnelled ports, or an error if the setup fails.
|
||||
// Errors from the tunnel running in the background are logged via klog.
|
||||
func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) {
|
||||
svc, err := t.v1Core.Services(namespace).Get(context.Background(), svcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -75,7 +85,8 @@ func (t *ServiceTunnel) Start(svcName, namespace string) ([]string, error) {
|
|||
return urls, nil
|
||||
}
|
||||
|
||||
// Stop ...
|
||||
// Stop attempts to gracefully stop the active SSH tunnel.
|
||||
// Any errors encountered during the stop process are logged as warnings.
|
||||
func (t *ServiceTunnel) Stop() {
|
||||
err := t.sshConn.stop()
|
||||
if err != nil {
|
||||
|
|
|
@ -130,20 +130,20 @@ func createSSHConnWithRandomPorts(name, sshPort, sshKey string, svc *v1.Service)
|
|||
usedPorts := make([]int, 0, len(svc.Spec.Ports))
|
||||
|
||||
for _, port := range svc.Spec.Ports {
|
||||
freeport, err := freeport.GetFreePort()
|
||||
freePort, err := freeport.GetFreePort()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arg := fmt.Sprintf(
|
||||
"-L %d:%s:%d",
|
||||
freeport,
|
||||
freePort,
|
||||
svc.Spec.ClusterIP,
|
||||
port.Port,
|
||||
)
|
||||
|
||||
sshArgs = append(sshArgs, arg)
|
||||
usedPorts = append(usedPorts, freeport)
|
||||
usedPorts = append(usedPorts, freePort)
|
||||
}
|
||||
|
||||
cmd := exec.Command("ssh", sshArgs...)
|
||||
|
|
|
@ -32,7 +32,9 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/tunnel"
|
||||
)
|
||||
|
||||
// SSHTunnel ...
|
||||
// SSHTunnel manages and reconciles SSH tunnels for Kubernetes Services
|
||||
// (specifically type LoadBalancer) and Ingress resources. It periodically
|
||||
// checks the cluster state and creates, maintains, or removes tunnels as needed.
|
||||
type SSHTunnel struct {
|
||||
ctx context.Context
|
||||
sshPort string
|
||||
|
@ -45,7 +47,13 @@ type SSHTunnel struct {
|
|||
connsToStop map[string]*sshConn
|
||||
}
|
||||
|
||||
// NewSSHTunnel ...
|
||||
// NewSSHTunnel creates and returns a new SSHTunnel instance.
|
||||
// ctx is the context that controls the lifecycle of the tunnel manager.
|
||||
// sshPort is the port number of the SSH server to connect to.
|
||||
// sshKey is the path to the SSH private key file for authentication.
|
||||
// bindAddress is the local address on which the tunnels will listen.
|
||||
// v1Core is a Kubernetes CoreV1 client interface for interacting with Services.
|
||||
// v1Networking is a Kubernetes NetworkingV1 client interface for interacting with Ingresses.
|
||||
func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Core typed_core.CoreV1Interface, v1Networking typed_networking.NetworkingV1Interface) *SSHTunnel {
|
||||
return &SSHTunnel{
|
||||
ctx: ctx,
|
||||
|
@ -60,7 +68,12 @@ func NewSSHTunnel(ctx context.Context, sshPort, sshKey, bindAddress string, v1Co
|
|||
}
|
||||
}
|
||||
|
||||
// Start ...
|
||||
// Start begins the main reconciliation loop for the SSHTunnel.
|
||||
// This loop periodically scans for Kubernetes Services (type LoadBalancer)
|
||||
// and Ingresses, creating or tearing down SSH tunnels as necessary.
|
||||
// This method blocks until the provided context (t.ctx) is canceled.
|
||||
// It returns any error associated with context cancellation or initial setup.
|
||||
// Runtime errors during the tunnel management loop are logged via klog.
|
||||
func (t *SSHTunnel) Start() error {
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -91,17 +91,18 @@ func (r *persistentRegistry) Register(tunnel *ID) (rerr error) {
|
|||
// tunnels simultaneously. It is possible that an old tunnel
|
||||
// from an old profile has duplicated route information so we
|
||||
// need to check both machine name and route information.
|
||||
if tunnel.MachineName == t.MachineName && t.Route.Equal(tunnel.Route) {
|
||||
isRunning, err := checkIfRunning(t.Pid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err)
|
||||
}
|
||||
if isRunning {
|
||||
return errorTunnelAlreadyExists(t)
|
||||
}
|
||||
tunnels[i] = tunnel
|
||||
alreadyExists = true
|
||||
if tunnel.MachineName != t.MachineName || !tunnel.Route.Equal(t.Route) {
|
||||
continue
|
||||
}
|
||||
isRunning, err := checkIfRunning(t.Pid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking whether conflicting tunnel (%v) is running: %s", t, err)
|
||||
}
|
||||
if isRunning {
|
||||
return errorTunnelAlreadyExists(t)
|
||||
}
|
||||
tunnels[i] = tunnel
|
||||
alreadyExists = true
|
||||
}
|
||||
|
||||
if !alreadyExists {
|
||||
|
|
|
@ -68,7 +68,7 @@ func (r *simpleReporter) Report(tunnelState *Status) {
|
|||
loadbalancer emulator: %s
|
||||
`, minikubeError, routerError, lbError)
|
||||
|
||||
_, err := r.out.Write([]byte(fmt.Sprintf(
|
||||
_, err := fmt.Fprintf(r.out,
|
||||
`Status:
|
||||
machine: %s
|
||||
pid: %d
|
||||
|
@ -80,7 +80,7 @@ func (r *simpleReporter) Report(tunnelState *Status) {
|
|||
tunnelState.TunnelID.Route,
|
||||
minikubeState,
|
||||
managedServices,
|
||||
errors)))
|
||||
errors)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to report state %s", err)
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func (t *routingTable) Equal(other *routingTable) bool {
|
|||
for i := range *t {
|
||||
routesEqual := (*t)[i].route.Equal((*other)[i].route)
|
||||
linesEqual := (*t)[i].line == ((*other)[i].line)
|
||||
if !(routesEqual && linesEqual) {
|
||||
if !routesEqual || !linesEqual {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,14 +57,14 @@ var _ controller.Provisioner = &hostPathProvisioner{}
|
|||
|
||||
// Provision creates a storage asset and returns a PV object representing it.
|
||||
func (p *hostPathProvisioner) Provision(_ context.Context, options controller.ProvisionOptions) (*core.PersistentVolume, controller.ProvisioningState, error) {
|
||||
path := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name)
|
||||
klog.Infof("Provisioning volume %v to %s", options, path)
|
||||
if err := os.MkdirAll(path, 0777); err != nil {
|
||||
hostPath := path.Join(p.pvDir, options.PVC.Namespace, options.PVC.Name)
|
||||
klog.Infof("Provisioning volume %v to %s", options, hostPath)
|
||||
if err := os.MkdirAll(hostPath, 0777); err != nil {
|
||||
return nil, controller.ProvisioningFinished, err
|
||||
}
|
||||
|
||||
// Explicitly chmod created dir, so we know mode is set to 0777 regardless of umask
|
||||
if err := os.Chmod(path, 0777); err != nil {
|
||||
if err := os.Chmod(hostPath, 0777); err != nil {
|
||||
return nil, controller.ProvisioningFinished, err
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (p *hostPathProvisioner) Provision(_ context.Context, options controller.Pr
|
|||
},
|
||||
PersistentVolumeSource: core.PersistentVolumeSource{
|
||||
HostPath: &core.HostPathVolumeSource{
|
||||
Path: path,
|
||||
Path: hostPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue