Merge
commit
f277d55e4a
|
@ -127,7 +127,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--vm-driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -216,7 +216,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome ./e2e-linux-amd64 -minikube-start-args=--driver=docker -test.timeout=80m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -297,7 +297,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=50m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -378,7 +378,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=50m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=none -test.timeout=35m -test.v -timeout-multiplier=1.5 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
@ -459,7 +459,7 @@ jobs:
|
|||
chmod a+x e2e-*
|
||||
chmod a+x minikube-*
|
||||
START_TIME=$(date -u +%s)
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=70m -test.v -timeout-multiplier=1.3 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
KUBECONFIG=$(pwd)/testhome/kubeconfig MINIKUBE_HOME=$(pwd)/testhome sudo -E ./e2e-linux-amd64 -minikube-start-args=--driver=podman -test.timeout=30m -test.v -timeout-multiplier=1 -binary=./minikube-linux-amd64 2>&1 | tee ./report/testout.txt
|
||||
END_TIME=$(date -u +%s)
|
||||
TIME_ELAPSED=$(($END_TIME-$START_TIME))
|
||||
min=$((${TIME_ELAPSED}/60))
|
||||
|
|
63
CHANGELOG.md
63
CHANGELOG.md
|
@ -1,6 +1,67 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.9.0- 2020-03-26
|
||||
## Version 1.9.2 - 2020-04-04
|
||||
|
||||
Minor improvements:
|
||||
|
||||
* UX: Remove noisy debug statement [#7407](https://github.com/kubernetes/minikube/pull/7407)
|
||||
* Feature: Make --wait more flexible [#7375](https://github.com/kubernetes/minikube/pull/7375)
|
||||
* Docker: adjust warn if slow for ps and volume [#7410](https://github.com/kubernetes/minikube/pull/7410)
|
||||
* Localization: Update Japanese translations [#7403](https://github.com/kubernetes/minikube/pull/7403)
|
||||
* Performance: Parallelize updating cluster and setting up certs [#7394](https://github.com/kubernetes/minikube/pull/7394)
|
||||
* Addons: allow ingress addon for docker/podman drivers only on linux for now [#7393](https://github.com/kubernetes/minikube/pull/7393)
|
||||
|
||||
- Anders F Björklund
|
||||
- Medya Ghazizadeh
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Thomas Strömberg
|
||||
- tomocy
|
||||
|
||||
## Version 1.9.1 - 2020-04-02
|
||||
|
||||
Improvements:
|
||||
|
||||
* add delete-on-failure flag [#7345](https://github.com/kubernetes/minikube/pull/7345)
|
||||
* Run dashboard with internal kubectl if not in path [#7299](https://github.com/kubernetes/minikube/pull/7299)
|
||||
* Implement options for the minikube version command [#7325](https://github.com/kubernetes/minikube/pull/7325)
|
||||
* service list cmd: display target port and name [#6879](https://github.com/kubernetes/minikube/pull/6879)
|
||||
* Add rejection reason to 'unable to find driver' error [#7379](https://github.com/kubernetes/minikube/pull/7379)
|
||||
* Update Japanese translations [#7359](https://github.com/kubernetes/minikube/pull/7359)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Make eviction and image GC settings consistent across kubeadm API versions [#7364](https://github.com/kubernetes/minikube/pull/7364)
|
||||
* Move errors and warnings to output to stderr [#7382](https://github.com/kubernetes/minikube/pull/7382)
|
||||
* Correct assumptions for forwarded hostname & IP handling [#7360](https://github.com/kubernetes/minikube/pull/7360)
|
||||
* Extend maximum stop retry from 30s to 120s [#7363](https://github.com/kubernetes/minikube/pull/7363)
|
||||
* Use kubectl version --short if --output=json fails [#7356](https://github.com/kubernetes/minikube/pull/7356)
|
||||
* Fix embed certs by updating kubeconfig after certs are populated [#7309](https://github.com/kubernetes/minikube/pull/7309)
|
||||
* none: Use LookPath to verify conntrack install [#7305](https://github.com/kubernetes/minikube/pull/7305)
|
||||
* Show all global flags in options command [#7292](https://github.com/kubernetes/minikube/pull/7292)
|
||||
* Fix null deref in start host err [#7278](https://github.com/kubernetes/minikube/pull/7278)
|
||||
* Increase Docker "slow" timeouts to 15s [#7268](https://github.com/kubernetes/minikube/pull/7268)
|
||||
* none: check for docker and root uid [#7388](https://github.com/kubernetes/minikube/pull/7388)
|
||||
|
||||
Thank you to our contributors for this release!
|
||||
|
||||
- Anders F Björklund
|
||||
- Dan Lorenc
|
||||
- Eberhard Wolff
|
||||
- John Laswell
|
||||
- Marcin Niemira
|
||||
- Medya Ghazizadeh
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Thomas Strömberg
|
||||
- Vincent Link
|
||||
- anencore94
|
||||
- priyawadhwa
|
||||
- re;i
|
||||
- tomocy
|
||||
|
||||
## Version 1.9.0 - 2020-03-26
|
||||
|
||||
New features & improvements
|
||||
|
||||
|
|
4
Makefile
4
Makefile
|
@ -15,7 +15,7 @@
|
|||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 9
|
||||
VERSION_BUILD ?= 0
|
||||
VERSION_BUILD ?= 2
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
|
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
|
|||
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
RPM_VERSION ?= $(DEB_VERSION)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -48,8 +49,7 @@ var ProfileCmd = &cobra.Command{
|
|||
name is in the list of reserved keywords
|
||||
*/
|
||||
if config.ProfileNameInReservedKeywords(profile) {
|
||||
out.ErrT(out.FailureType, `Profile name "{{.profilename}}" is minikube keyword. To delete profile use command minikube delete -p <profile name> `, out.V{"profilename": profile})
|
||||
os.Exit(0)
|
||||
exit.WithCodeT(exit.Config, `Profile name "{{.profilename}}" is reserved keyword. To delete this profile, run: "{{.cmd}}"`, out.V{"profilename": profile, "cmd": mustload.ExampleCmd(profile, "delete")})
|
||||
}
|
||||
|
||||
if profile == "default" {
|
||||
|
|
|
@ -91,13 +91,13 @@ var printProfilesTable = func() {
|
|||
table.Render()
|
||||
|
||||
if invalidProfiles != nil {
|
||||
out.T(out.Warning, "Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
out.WarningT("Found {{.number}} invalid profile(s) ! ", out.V{"number": len(invalidProfiles)})
|
||||
for _, p := range invalidProfiles {
|
||||
out.T(out.Empty, "\t "+p.Name)
|
||||
out.ErrT(out.Empty, "\t "+p.Name)
|
||||
}
|
||||
out.T(out.Tip, "You can delete them using the following command(s): ")
|
||||
out.ErrT(out.Tip, "You can delete them using the following command(s): ")
|
||||
for _, p := range invalidProfiles {
|
||||
out.String(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
out.Err(fmt.Sprintf("\t $ minikube delete -p %s \n", p.Name))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func IsValidDriver(string, name string) error {
|
|||
|
||||
// RequiresRestartMsg returns the "requires restart" message
|
||||
func RequiresRestartMsg(string, string) error {
|
||||
out.T(out.Warning, "These changes will take effect upon a minikube delete and then a minikube start")
|
||||
out.WarningT("These changes will take effect upon a minikube delete and then a minikube start")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -63,10 +63,8 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
kubectl, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.NoInput, "kubectl not found in PATH, but is required for the dashboard. Installation guide: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
}
|
||||
kubectlVersion := co.Config.KubernetesConfig.KubernetesVersion
|
||||
var err error
|
||||
|
||||
// Check dashboard status before enabling it
|
||||
dashboardAddon := assets.Addons["dashboard"]
|
||||
|
@ -90,7 +88,7 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
out.ErrT(out.Launch, "Launching proxy ...")
|
||||
p, hostPort, err := kubectlProxy(kubectl, cname)
|
||||
p, hostPort, err := kubectlProxy(kubectlVersion, cname)
|
||||
if err != nil {
|
||||
exit.WithError("kubectl proxy", err)
|
||||
}
|
||||
|
@ -124,10 +122,17 @@ var dashboardCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
// kubectlProxy runs "kubectl proxy", returning host:port
|
||||
func kubectlProxy(path string, contextName string) (*exec.Cmd, string, error) {
|
||||
func kubectlProxy(kubectlVersion string, contextName string) (*exec.Cmd, string, error) {
|
||||
// port=0 picks a random system port
|
||||
|
||||
cmd := exec.Command(path, "--context", contextName, "proxy", "--port=0")
|
||||
kubectlArgs := []string{"--context", contextName, "proxy", "--port=0"}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if kubectl, err := exec.LookPath("kubectl"); err == nil {
|
||||
cmd = exec.Command(kubectl, kubectlArgs...)
|
||||
} else if cmd, err = KubectlCommand(kubectlVersion, kubectlArgs...); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
|
|
|
@ -236,7 +236,7 @@ func deleteProfile(profile *config.Profile) error {
|
|||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
deleteHosts(api, cc)
|
||||
|
@ -264,7 +264,7 @@ func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
|
|||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -129,7 +129,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
driverName := co.CPHost.DriverName
|
||||
driverName := co.CP.Host.DriverName
|
||||
|
||||
if driverName == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube docker-env' command`)
|
||||
|
@ -140,7 +140,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
|
||||
}
|
||||
|
||||
if ok := isDockerActive(co.CPRunner); !ok {
|
||||
if ok := isDockerActive(co.CP.Runner); !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname})
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
|
||||
var err error
|
||||
port := constants.DockerDaemonPort
|
||||
if driver.IsKIC(driverName) {
|
||||
if driver.NeedsPortForward(driverName) {
|
||||
port, err = oci.ForwardedPort(driverName, cname, port)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Failure, "Error getting port binding for '{{.driver_name}} driver: {{.error}}", out.V{"driver_name": driverName, "error": err})
|
||||
|
@ -161,7 +161,7 @@ var dockerEnvCmd = &cobra.Command{
|
|||
EnvConfig: sh,
|
||||
profile: cname,
|
||||
driver: driverName,
|
||||
hostIP: co.DriverIP.String(),
|
||||
hostIP: co.CP.IP.String(),
|
||||
port: port,
|
||||
certsDir: localpath.MakeMiniPath("certs"),
|
||||
noProxy: noProxy,
|
||||
|
|
|
@ -29,6 +29,6 @@ var ipCmd = &cobra.Command{
|
|||
Long: `Retrieves the IP address of the running cluster, and writes it to STDOUT.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
out.Ln(co.DriverIP.String())
|
||||
out.Ln(co.CP.IP.String())
|
||||
},
|
||||
}
|
||||
|
|
|
@ -43,17 +43,12 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
co := mustload.Healthy(ClusterFlagValue())
|
||||
|
||||
version := co.Config.KubernetesConfig.KubernetesVersion
|
||||
if version == "" {
|
||||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
c, err := KubectlCommand(version, args...)
|
||||
if err != nil {
|
||||
out.ErrLn("Error caching kubectl: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("Running %s %v", path, args)
|
||||
c := exec.Command(path, args...)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
|
@ -70,3 +65,17 @@ minikube kubectl -- get pods --namespace kube-system`,
|
|||
}
|
||||
},
|
||||
}
|
||||
|
||||
// KubectlCommand will return kubectl command with a version matching the cluster
|
||||
func KubectlCommand(version string, args ...string) (*exec.Cmd, error) {
|
||||
if version == "" {
|
||||
version = constants.DefaultKubernetesVersion
|
||||
}
|
||||
|
||||
path, err := node.CacheKubectlBinary(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exec.Command(path, args...), nil
|
||||
}
|
||||
|
|
|
@ -53,32 +53,32 @@ var logsCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
|
||||
bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CPNode)
|
||||
bs, err := cluster.Bootstrapper(co.API, viper.GetString(cmdcfg.Bootstrapper), *co.Config, *co.CP.Node)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting cluster bootstrapper", err)
|
||||
}
|
||||
|
||||
cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CPRunner})
|
||||
cr, err := cruntime.New(cruntime.Config{Type: co.Config.KubernetesConfig.ContainerRuntime, Runner: co.CP.Runner})
|
||||
if err != nil {
|
||||
exit.WithError("Unable to get runtime", err)
|
||||
}
|
||||
if followLogs {
|
||||
err := logs.Follow(cr, bs, *co.Config, co.CPRunner)
|
||||
err := logs.Follow(cr, bs, *co.Config, co.CP.Runner)
|
||||
if err != nil {
|
||||
exit.WithError("Follow", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if showProblems {
|
||||
problems := logs.FindProblems(cr, bs, *co.Config, co.CPRunner)
|
||||
problems := logs.FindProblems(cr, bs, *co.Config, co.CP.Runner)
|
||||
logs.OutputProblems(problems, numberOfProblems)
|
||||
return
|
||||
}
|
||||
err = logs.Output(cr, bs, *co.Config, co.CPRunner, numberOfLines)
|
||||
err = logs.Output(cr, bs, *co.Config, co.CP.Runner, numberOfLines)
|
||||
if err != nil {
|
||||
out.Ln("")
|
||||
// Avoid exit.WithError, since it outputs the issue URL
|
||||
out.T(out.Warning, "{{.error}}", out.V{"error": err})
|
||||
out.WarningT("{{.error}}", out.V{"error": err})
|
||||
os.Exit(exit.Unavailable)
|
||||
}
|
||||
},
|
||||
|
|
|
@ -99,14 +99,14 @@ var mountCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
co := mustload.Running(ClusterFlagValue())
|
||||
if co.CPHost.Driver.DriverName() == driver.None {
|
||||
if co.CP.Host.Driver.DriverName() == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube mount' command`)
|
||||
}
|
||||
|
||||
var ip net.IP
|
||||
var err error
|
||||
if mountIP == "" {
|
||||
ip, err = cluster.GetVMHostIP(co.CPHost)
|
||||
ip, err = cluster.GetVMHostIP(co.CP.Host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting the host IP address to use from within the VM", err)
|
||||
}
|
||||
|
@ -143,11 +143,11 @@ var mountCmd = &cobra.Command{
|
|||
|
||||
// An escape valve to allow future hackers to try NFS, VirtFS, or other FS types.
|
||||
if !supportedFilesystems[cfg.Type] {
|
||||
out.T(out.Warning, "{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
out.WarningT("{{.type}} is not yet a supported filesystem. We will try anyways!", out.V{"type": cfg.Type})
|
||||
}
|
||||
|
||||
bindIP := ip.String() // the ip to listen on the user's host machine
|
||||
if driver.IsKIC(co.CPHost.Driver.DriverName()) && runtime.GOOS != "linux" {
|
||||
if driver.IsKIC(co.CP.Host.Driver.DriverName()) && runtime.GOOS != "linux" {
|
||||
bindIP = "127.0.0.1"
|
||||
}
|
||||
out.T(out.Mounting, "Mounting host path {{.sourcePath}} into VM as {{.destinationPath}} ...", out.V{"sourcePath": hostPath, "destinationPath": vmPath})
|
||||
|
@ -177,15 +177,15 @@ var mountCmd = &cobra.Command{
|
|||
go func() {
|
||||
for sig := range c {
|
||||
out.T(out.Unmount, "Unmounting {{.path}} ...", out.V{"path": vmPath})
|
||||
err := cluster.Unmount(co.CPRunner, vmPath)
|
||||
err := cluster.Unmount(co.CP.Runner, vmPath)
|
||||
if err != nil {
|
||||
out.ErrT(out.FailureType, "Failed unmount: {{.error}}", out.V{"error": err})
|
||||
out.FailureT("Failed unmount: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
exit.WithCodeT(exit.Interrupted, "Received {{.name}} signal", out.V{"name": sig})
|
||||
}
|
||||
}()
|
||||
|
||||
err = cluster.Mount(co.CPRunner, ip.String(), vmPath, cfg)
|
||||
err = cluster.Mount(co.CP.Runner, ip.String(), vmPath, cfg)
|
||||
if err != nil {
|
||||
exit.WithError("mount failed", err)
|
||||
}
|
||||
|
|
|
@ -18,10 +18,8 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
"k8s.io/minikube/pkg/minikube/node"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
|
@ -40,7 +38,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
cc := co.Config
|
||||
|
||||
if driver.BareMetal(cc.Driver) {
|
||||
out.ErrT(out.FailureType, "none driver does not support multi-node clusters")
|
||||
out.FailureT("none driver does not support multi-node clusters")
|
||||
}
|
||||
|
||||
name := node.Name(len(cc.Nodes) + 1)
|
||||
|
@ -56,7 +54,7 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if err := node.Add(cc, n); err != nil {
|
||||
exit.WithError("Error adding node to cluster", err)
|
||||
maybeDeleteAndRetry(*cc, n, nil, err)
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
|
||||
|
@ -64,13 +62,10 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
func init() {
|
||||
// TODO(https://github.com/kubernetes/minikube/issues/7366): We should figure out which minikube start flags to actually import
|
||||
nodeAddCmd.Flags().BoolVar(&cp, "control-plane", false, "If true, the node added will also be a control plane in addition to a worker.")
|
||||
nodeAddCmd.Flags().BoolVar(&worker, "worker", true, "If true, the added node will be marked for work. Defaults to true.")
|
||||
//We should figure out which of these flags to actually import
|
||||
startCmd.Flags().Visit(
|
||||
func(f *pflag.Flag) {
|
||||
nodeAddCmd.Flags().AddFlag(f)
|
||||
},
|
||||
)
|
||||
nodeAddCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
|
||||
nodeCmd.AddCommand(nodeAddCmd)
|
||||
}
|
||||
|
|
|
@ -49,12 +49,15 @@ var nodeStartCmd = &cobra.Command{
|
|||
exit.WithError("retrieving node", err)
|
||||
}
|
||||
|
||||
// Start it up baby
|
||||
node.Start(*cc, *n, nil, false)
|
||||
_, err = node.Start(*cc, *n, nil, false)
|
||||
if err != nil {
|
||||
maybeDeleteAndRetry(*cc, *n, nil, err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
|
||||
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
nodeCmd.AddCommand(nodeStartCmd)
|
||||
}
|
||||
|
|
|
@ -108,17 +108,17 @@ var podmanEnvCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
driverName := co.CPHost.DriverName
|
||||
driverName := co.CP.Host.DriverName
|
||||
|
||||
if driverName == driver.None {
|
||||
exit.UsageT(`'none' driver does not support 'minikube podman-env' command`)
|
||||
}
|
||||
|
||||
if ok := isPodmanAvailable(co.CPRunner); !ok {
|
||||
if ok := isPodmanAvailable(co.CP.Runner); !ok {
|
||||
exit.WithCodeT(exit.Unavailable, `The podman service within '{{.cluster}}' is not active`, out.V{"cluster": cname})
|
||||
}
|
||||
|
||||
client, err := createExternalSSHClient(co.CPHost.Driver)
|
||||
client, err := createExternalSSHClient(co.CP.Host.Driver)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting ssh client", err)
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ func startKicServiceTunnel(svc, configName string) {
|
|||
service.PrintServiceList(os.Stdout, data)
|
||||
|
||||
openURLs(svc, urls)
|
||||
out.T(out.Warning, "Because you are using docker driver on Mac, the terminal needs to be open to run it.")
|
||||
out.WarningT("Because you are using docker driver on Mac, the terminal needs to be open to run it.")
|
||||
|
||||
<-ctrlC
|
||||
|
||||
|
|
|
@ -43,14 +43,14 @@ var sshCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
if co.CPHost.DriverName == driver.None {
|
||||
if co.CP.Host.DriverName == driver.None {
|
||||
exit.UsageT("'none' driver does not support 'minikube ssh' command")
|
||||
}
|
||||
|
||||
var err error
|
||||
var n *config.Node
|
||||
if nodeName == "" {
|
||||
n = co.CPNode
|
||||
n = co.CP.Node
|
||||
} else {
|
||||
n, _, err = node.Retrieve(co.Config, nodeName)
|
||||
if err != nil {
|
||||
|
|
|
@ -44,6 +44,7 @@ import (
|
|||
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
|
@ -109,7 +110,7 @@ const (
|
|||
downloadOnly = "download-only"
|
||||
dnsProxy = "dns-proxy"
|
||||
hostDNSResolver = "host-dns-resolver"
|
||||
waitUntilHealthy = "wait"
|
||||
waitComponents = "wait"
|
||||
force = "force"
|
||||
dryRun = "dry-run"
|
||||
interactive = "interactive"
|
||||
|
@ -124,6 +125,7 @@ const (
|
|||
natNicType = "nat-nic-type"
|
||||
nodes = "nodes"
|
||||
preload = "preload"
|
||||
deleteOnFailure = "delete-on-failure"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -170,13 +172,14 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin.")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\".")
|
||||
startCmd.Flags().Bool(waitUntilHealthy, true, "Block until the apiserver is servicing API requests")
|
||||
startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ",")))
|
||||
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
|
||||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.")
|
||||
startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.")
|
||||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
|
@ -190,7 +193,7 @@ func initKubernetesFlags() {
|
|||
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
|
||||
startCmd.Flags().Int(apiServerPort, constants.APIServerPort, "The apiserver listening port")
|
||||
startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().String(apiServerName, constants.APIServerName, "The authoritative apiserver hostname for apiserver certificates and connectivity. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
|
||||
}
|
||||
|
@ -353,7 +356,10 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
}
|
||||
|
||||
kubeconfig := node.Start(cc, n, existingAddons, true)
|
||||
kubeconfig, err := node.Start(cc, n, existingAddons, true)
|
||||
if err != nil {
|
||||
kubeconfig = maybeDeleteAndRetry(cc, n, existingAddons, err)
|
||||
}
|
||||
|
||||
numNodes := viper.GetInt(nodes)
|
||||
if numNodes == 1 && existing != nil {
|
||||
|
@ -371,6 +377,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
ControlPlane: false,
|
||||
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
|
||||
}
|
||||
out.Ln("") // extra newline for clarity on the command line
|
||||
err := node.Add(&cc, n)
|
||||
if err != nil {
|
||||
exit.WithError("adding node", err)
|
||||
|
@ -428,26 +435,16 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
|
||||
path, err := exec.LookPath("kubectl")
|
||||
if err != nil {
|
||||
out.T(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
out.ErrT(out.Tip, "For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/")
|
||||
return nil
|
||||
}
|
||||
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
gitVersion, err := kubectlVersion(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "exec")
|
||||
return err
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
client, err := semver.Make(strings.TrimPrefix(cv.ClientVersion.GitVersion, version.VersionPrefix))
|
||||
client, err := semver.Make(strings.TrimPrefix(gitVersion, version.VersionPrefix))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "client semver")
|
||||
}
|
||||
|
@ -458,14 +455,71 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
|
||||
if client.Major != cluster.Major || minorSkew > 1 {
|
||||
out.Ln("")
|
||||
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
||||
out.WarningT("{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
||||
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
||||
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||
out.ErrT(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||
out.V{"path": path, "client_version": client})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func maybeDeleteAndRetry(cc config.ClusterConfig, n config.Node, existingAddons map[string]bool, originalErr error) *kubeconfig.Settings {
|
||||
if viper.GetBool(deleteOnFailure) {
|
||||
out.WarningT("Node {{.name}} failed to start, deleting and trying again.", out.V{"name": n.Name})
|
||||
// Start failed, delete the cluster and try again
|
||||
profile, err := config.LoadProfile(cc.Name)
|
||||
if err != nil {
|
||||
out.ErrT(out.Meh, `"{{.name}}" profile does not exist, trying anyways.`, out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
err = deleteProfile(profile)
|
||||
if err != nil {
|
||||
out.WarningT("Failed to delete cluster {{.name}}, proceeding with retry anyway.", out.V{"name": cc.Name})
|
||||
}
|
||||
|
||||
var kubeconfig *kubeconfig.Settings
|
||||
for _, v := range cc.Nodes {
|
||||
k, err := node.Start(cc, v, existingAddons, v.ControlPlane)
|
||||
if v.ControlPlane {
|
||||
kubeconfig = k
|
||||
}
|
||||
if err != nil {
|
||||
// Ok we failed again, let's bail
|
||||
exit.WithError("Start failed after cluster deletion", err)
|
||||
}
|
||||
}
|
||||
return kubeconfig
|
||||
}
|
||||
// Don't delete the cluster unless they ask
|
||||
exit.WithError("startup failed", originalErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlVersion(path string) (string, error) {
|
||||
j, err := exec.Command(path, "version", "--client", "--output=json").Output()
|
||||
if err != nil {
|
||||
// really old kubernetes clients did not have the --output parameter
|
||||
b, err := exec.Command(path, "version", "--client", "--short").Output()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "exec")
|
||||
}
|
||||
s := strings.TrimSpace(string(b))
|
||||
return strings.Replace(s, "Client Version: ", "", 1), nil
|
||||
}
|
||||
|
||||
cv := struct {
|
||||
ClientVersion struct {
|
||||
GitVersion string `json:"gitVersion"`
|
||||
} `json:"clientVersion"`
|
||||
}{}
|
||||
err = json.Unmarshal(j, &cv)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unmarshal")
|
||||
}
|
||||
|
||||
return cv.ClientVersion.GitVersion, nil
|
||||
}
|
||||
|
||||
func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
||||
// Technically unrelated, but important to perform before detection
|
||||
driver.SetLibvirtURI(viper.GetString(kvmQemuURI))
|
||||
|
@ -488,7 +542,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
|||
|
||||
If vm-driver is set in the global config, please run "minikube config unset vm-driver" to resolve this warning.
|
||||
`
|
||||
out.T(out.Warning, warning, out.V{"driver": d, "vmd": vmd})
|
||||
out.WarningT(warning, out.V{"driver": d, "vmd": vmd})
|
||||
}
|
||||
ds := driver.Status(d)
|
||||
if ds.Name == "" {
|
||||
|
@ -508,9 +562,15 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
|
|||
return ds
|
||||
}
|
||||
|
||||
pick, alts := driver.Suggest(driver.Choices(viper.GetBool("vm")))
|
||||
choices := driver.Choices(viper.GetBool("vm"))
|
||||
pick, alts, rejects := driver.Suggest(choices)
|
||||
if pick.Name == "" {
|
||||
exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
|
||||
out.T(out.ThumbsDown, "Unable to pick a default driver. Here is what was considered, in preference order:")
|
||||
for _, r := range rejects {
|
||||
out.T(out.Option, "{{ .name }}: {{ .rejection }}", out.V{"name": r.Name, "rejection": r.Rejection})
|
||||
}
|
||||
out.T(out.Workaround, "Try specifying a --driver, or see https://minikube.sigs.k8s.io/docs/start/")
|
||||
os.Exit(exit.Unavailable)
|
||||
}
|
||||
|
||||
if len(alts) > 1 {
|
||||
|
@ -691,9 +751,9 @@ func validateUser(drvName string) {
|
|||
return
|
||||
}
|
||||
|
||||
out.T(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
|
||||
out.T(out.Tip, "If you are running minikube within a VM, consider using --driver=none:")
|
||||
out.T(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
out.ErrT(out.Stopped, `The "{{.driver_name}}" driver should not be used with root privileges.`, out.V{"driver_name": drvName})
|
||||
out.ErrT(out.Tip, "If you are running minikube within a VM, consider using --driver=none:")
|
||||
out.ErrT(out.Documentation, " https://minikube.sigs.k8s.io/docs/reference/drivers/none/")
|
||||
|
||||
if !useForce {
|
||||
os.Exit(exit.Permissions)
|
||||
|
@ -701,7 +761,7 @@ func validateUser(drvName string) {
|
|||
cname := ClusterFlagValue()
|
||||
_, err = config.Load(cname)
|
||||
if err == nil || !config.IsNotExist(err) {
|
||||
out.T(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
|
||||
out.ErrT(out.Tip, "Tip: To remove this root owned cluster, run: sudo {{.cmd}}", out.V{"cmd": mustload.ExampleCmd(cname, "delete")})
|
||||
}
|
||||
if !useForce {
|
||||
exit.WithCodeT(exit.Permissions, "Exiting")
|
||||
|
@ -1009,6 +1069,7 @@ func createNode(cmd *cobra.Command, k8sVersion, kubeNodeName, drvName, repositor
|
|||
},
|
||||
Nodes: []config.Node{cp},
|
||||
}
|
||||
cfg.VerifyComponents = interpretWaitFlag(*cmd)
|
||||
return cfg, cp, nil
|
||||
}
|
||||
|
||||
|
@ -1141,3 +1202,48 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
|
|||
}
|
||||
return nv
|
||||
}
|
||||
|
||||
// interpretWaitFlag interprets the wait flag and respects the legacy minikube users
|
||||
// returns map of components to wait for
|
||||
func interpretWaitFlag(cmd cobra.Command) map[string]bool {
|
||||
if !cmd.Flags().Changed(waitComponents) {
|
||||
glog.Infof("Wait components to verify : %+v", kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
waitFlags, err := cmd.Flags().GetStringSlice(waitComponents)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to read --wait from flags: %v.\n Moving on will use the default wait components: %+v", err, kverify.DefaultComponents)
|
||||
return kverify.DefaultComponents
|
||||
}
|
||||
|
||||
if len(waitFlags) == 1 {
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "false" || waitFlags[0] == "none" {
|
||||
glog.Infof("Waiting for no components: %+v", kverify.NoComponents)
|
||||
return kverify.NoComponents
|
||||
}
|
||||
// respecting legacy flag before minikube 1.9.0, wait flag was boolean
|
||||
if waitFlags[0] == "true" || waitFlags[0] == "all" {
|
||||
glog.Infof("Waiting for all components: %+v", kverify.AllComponents)
|
||||
return kverify.AllComponents
|
||||
}
|
||||
}
|
||||
|
||||
waitComponents := kverify.NoComponents
|
||||
for _, wc := range waitFlags {
|
||||
seen := false
|
||||
for _, valid := range kverify.AllComponentsList {
|
||||
if wc == valid {
|
||||
waitComponents[wc] = true
|
||||
seen = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !seen {
|
||||
glog.Warningf("The value %q is invalid for --wait flag. valid options are %q", wc, strings.Join(kverify.AllComponentsList, ","))
|
||||
}
|
||||
}
|
||||
glog.Infof("Waiting for components: %+v", waitComponents)
|
||||
return waitComponents
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
"k8s.io/minikube/pkg/minikube/cluster"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/kubeconfig"
|
||||
|
@ -187,33 +186,18 @@ func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status
|
|||
}
|
||||
|
||||
// We have a fully operational host, now we can check for details
|
||||
ip, err := cluster.GetHostDriverIP(api, name)
|
||||
if err != nil {
|
||||
glog.Errorln("Error host driver ip status:", err)
|
||||
st.APIServer = state.Error.String()
|
||||
if _, err := cluster.GetHostDriverIP(api, name); err != nil {
|
||||
glog.Errorf("failed to get driver ip: %v", err)
|
||||
st.Host = state.Error.String()
|
||||
return st, err
|
||||
}
|
||||
|
||||
port, err := kubeconfig.Port(name)
|
||||
if err != nil {
|
||||
glog.Warningf("unable to get port: %v", err)
|
||||
port = constants.APIServerPort
|
||||
}
|
||||
|
||||
st.Kubeconfig = Misconfigured
|
||||
st.Kubeconfig = Configured
|
||||
if !controlPlane {
|
||||
st.Kubeconfig = Irrelevant
|
||||
st.APIServer = Irrelevant
|
||||
}
|
||||
|
||||
if st.Kubeconfig != Irrelevant {
|
||||
ok, err := kubeconfig.IsClusterInConfig(ip, cc.Name)
|
||||
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
|
||||
if ok {
|
||||
st.Kubeconfig = Configured
|
||||
}
|
||||
}
|
||||
|
||||
host, err := machine.LoadHost(api, name)
|
||||
if err != nil {
|
||||
return st, err
|
||||
|
@ -234,18 +218,33 @@ func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status
|
|||
st.Kubelet = stk.String()
|
||||
}
|
||||
|
||||
if st.APIServer != Irrelevant {
|
||||
sta, err := kverify.APIServerStatus(cr, ip, port)
|
||||
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||
// Early exit for regular nodes
|
||||
if !controlPlane {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
hostname, _, port, err := driver.ControlPaneEndpoint(&cc, &n, host.DriverName)
|
||||
if err != nil {
|
||||
glog.Errorf("forwarded endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
} else {
|
||||
err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port)
|
||||
if err != nil {
|
||||
glog.Errorln("Error apiserver status:", err)
|
||||
st.APIServer = state.Error.String()
|
||||
} else {
|
||||
st.APIServer = sta.String()
|
||||
glog.Errorf("kubeconfig endpoint: %v", err)
|
||||
st.Kubeconfig = Misconfigured
|
||||
}
|
||||
}
|
||||
|
||||
sta, err := kverify.APIServerStatus(cr, hostname, port)
|
||||
glog.Infof("%s apiserver status = %s (err=%v)", name, stk, err)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorln("Error apiserver status:", err)
|
||||
st.APIServer = state.Error.String()
|
||||
} else {
|
||||
st.APIServer = sta.String()
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ func runStop(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if err := killMountProcess(); err != nil {
|
||||
out.T(out.Warning, "Unable to kill mount process: {{.error}}", out.V{"error": err})
|
||||
out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
if err := kubeconfig.UnsetCurrentContext(cname, kubeconfig.PathFromEnv()); err != nil {
|
||||
|
@ -88,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
|
|||
}
|
||||
}
|
||||
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil {
|
||||
if err := retry.Expo(tryStop, 1*time.Second, 120*time.Second, 5); err != nil {
|
||||
exit.WithError("Unable to stop VM", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -33,14 +33,15 @@ var updateContextCmd = &cobra.Command{
|
|||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cname := ClusterFlagValue()
|
||||
co := mustload.Running(cname)
|
||||
updated, err := kubeconfig.UpdateIP(co.DriverIP, cname, kubeconfig.PathFromEnv())
|
||||
|
||||
updated, err := kubeconfig.UpdateEndpoint(cname, co.CP.Hostname, co.CP.Port, kubeconfig.PathFromEnv())
|
||||
if err != nil {
|
||||
exit.WithError("update config", err)
|
||||
}
|
||||
if updated {
|
||||
out.T(out.Celebrate, "{{.cluster}} IP has been updated to point at {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP})
|
||||
out.T(out.Celebrate, `"{{.context}}" context has been updated to point to {{.hostname}}:{{.port}}`, out.V{"context": cname, "hostname": co.CP.Hostname, "port": co.CP.Port})
|
||||
} else {
|
||||
out.T(out.Meh, "{{.cluster}} IP was already correctly configured for {{.ip}}", out.V{"cluster": cname, "ip": co.DriverIP})
|
||||
out.T(out.Meh, `No changes required for the "{{.context}}" context`, out.V{"context": cname})
|
||||
}
|
||||
|
||||
},
|
||||
|
|
|
@ -17,20 +17,56 @@ limitations under the License.
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
versionOutput string
|
||||
shortVersion bool
|
||||
)
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version of minikube",
|
||||
Long: `Print the version of minikube.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
out.Ln("minikube version: %v", version.GetVersion())
|
||||
minikubeVersion := version.GetVersion()
|
||||
gitCommitID := version.GetGitCommitID()
|
||||
if gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
data := map[string]string{
|
||||
"minikubeVersion": minikubeVersion,
|
||||
"commit": gitCommitID,
|
||||
}
|
||||
switch versionOutput {
|
||||
case "":
|
||||
out.Ln("minikube version: %v", minikubeVersion)
|
||||
if !shortVersion && gitCommitID != "" {
|
||||
out.Ln("commit: %v", gitCommitID)
|
||||
}
|
||||
case "json":
|
||||
json, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version json failure", err)
|
||||
}
|
||||
out.Ln(string(json))
|
||||
case "yaml":
|
||||
yaml, err := yaml.Marshal(data)
|
||||
if err != nil {
|
||||
exit.WithError("version yaml failure", err)
|
||||
}
|
||||
out.Ln(string(yaml))
|
||||
default:
|
||||
exit.WithCodeT(exit.BadUsage, "error: --output must be 'yaml' or 'json'")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
versionCmd.Flags().StringVarP(&versionOutput, "output", "o", "", "One of 'yaml' or 'json'.")
|
||||
versionCmd.Flags().BoolVar(&shortVersion, "short", false, "Print just the version number.")
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"testing"
|
||||
|
||||
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/notify"
|
||||
"k8s.io/minikube/pkg/util"
|
||||
)
|
||||
|
|
|
@ -1,4 +1,12 @@
|
|||
[
|
||||
{
|
||||
"name": "v1.9.1",
|
||||
"checksums": {
|
||||
"darwin": "ac8855ea54e798fa6f00e8c251b55c3d2a54e3b80e896162958a5ac7b0e3f60b",
|
||||
"linux": "7174c881289a7302a05d477c67cc1ef5b48153e825089d6c0d0bcfaebe33d42a",
|
||||
"windows": "91d15b2ef8f357aa463ae16de59f6e018120398f492ba4e35cd77f21acb27d5c"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "v1.9.0",
|
||||
"checksums": {
|
||||
|
|
1
go.mod
1
go.mod
|
@ -82,6 +82,7 @@ require (
|
|||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 // indirect
|
||||
google.golang.org/grpc v1.26.0 // indirect
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
gotest.tools/v3 v3.0.2 // indirect
|
||||
k8s.io/api v0.17.3
|
||||
k8s.io/apimachinery v0.17.3
|
||||
|
|
|
@ -45,6 +45,9 @@ export HOMEBREW_GITHUB_API_TOKEN="${access_token}"
|
|||
# sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"
|
||||
export PATH=/home/linuxbrew/.linuxbrew/bin:$PATH
|
||||
|
||||
# avoid "error: you need to resolve your current index first" message
|
||||
cd "${SRC_DIR}"
|
||||
|
||||
brew bump-formula-pr \
|
||||
--strict minikube \
|
||||
--revision="${revision}" \
|
||||
|
|
|
@ -19,6 +19,7 @@ package addons
|
|||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -122,6 +123,17 @@ func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) err
|
|||
}
|
||||
}
|
||||
|
||||
// to match both ingress and ingress-dns adons
|
||||
if strings.HasPrefix(name, "ingress") && enable && driver.IsKIC(cc.Driver) && runtime.GOOS != "linux" {
|
||||
exit.UsageT(`Due to {{.driver_name}} networking limitations on {{.os_name}}, {{.addon_name}} addon is not supported for this driver.
|
||||
Alternatively to use this addon you can use a vm-based driver:
|
||||
|
||||
'minikube start --vm=true'
|
||||
|
||||
To track the update on this work in progress feature please check:
|
||||
https://github.com/kubernetes/minikube/issues/7332`, out.V{"driver_name": cc.Driver, "os_name": runtime.GOOS, "addon_name": name})
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "istio") && enable {
|
||||
minMem := 8192
|
||||
minCPUs := 4
|
||||
|
|
|
@ -234,21 +234,26 @@ func ContainerID(ociBinary string, nameOrID string) (string, error) {
|
|||
}
|
||||
|
||||
// WarnIfSlow runs an oci command, warning about performance issues
|
||||
func WarnIfSlow(arg ...string) ([]byte, error) {
|
||||
killTime := 15 * time.Second
|
||||
func WarnIfSlow(args ...string) ([]byte, error) {
|
||||
killTime := 19 * time.Second
|
||||
warnTime := 2 * time.Second
|
||||
|
||||
if args[1] == "volume" || args[1] == "ps" { // volume and ps requires more time than inspect
|
||||
killTime = 30 * time.Second
|
||||
warnTime = 3 * time.Second
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), killTime)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("executing with %s timeout: %v", arg, killTime)
|
||||
cmd := exec.CommandContext(ctx, arg[0], arg[1:]...)
|
||||
glog.Infof("executing with %s timeout: %v", args, killTime)
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
stdout, err := cmd.Output()
|
||||
d := time.Since(start)
|
||||
if d > warnTime {
|
||||
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d})
|
||||
out.T(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": arg[0]})
|
||||
out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": args[0]})
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
|
|
|
@ -18,7 +18,6 @@ package none
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
|
@ -126,20 +125,14 @@ func (d *Driver) GetURL() (string, error) {
|
|||
|
||||
// GetState returns the state that the host is in (running, stopped, etc)
|
||||
func (d *Driver) GetState() (state.State, error) {
|
||||
glog.Infof("GetState called")
|
||||
ip, err := d.GetIP()
|
||||
if err != nil {
|
||||
return state.Error, err
|
||||
}
|
||||
|
||||
port, err := kubeconfig.Port(d.BaseDriver.MachineName)
|
||||
hostname, port, err := kubeconfig.Endpoint(d.BaseDriver.MachineName)
|
||||
if err != nil {
|
||||
glog.Warningf("unable to get port: %v", err)
|
||||
port = constants.APIServerPort
|
||||
}
|
||||
|
||||
// Confusing logic, as libmachine.Stop will loop until the state == Stopped
|
||||
ast, err := kverify.APIServerStatus(d.exec, net.ParseIP(ip), port)
|
||||
ast, err := kverify.APIServerStatus(d.exec, hostname, port)
|
||||
if err != nil {
|
||||
return ast, err
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package bootstrapper
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
|
@ -47,7 +46,7 @@ type Bootstrapper interface {
|
|||
LogCommands(config.ClusterConfig, LogOptions) map[string]string
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
GetKubeletStatus() (string, error)
|
||||
GetAPIServerStatus(net.IP, int) (string, error)
|
||||
GetAPIServerStatus(string, int) (string, error)
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
@ -64,6 +64,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -73,6 +73,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -68,4 +68,17 @@ networking:
|
|||
dnsDomain: {{if .DNSDomain}}{{.DNSDomain}}{{else}}cluster.local{{end}}
|
||||
podSubnet: "{{.PodSubnet }}"
|
||||
serviceSubnet: {{.ServiceCIDR}}
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: {{.AdvertiseAddress}}:10249
|
||||
`))
|
||||
|
|
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kverify verifies a running kubernetes cluster is healthy
|
||||
package kverify
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
)
|
||||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver process to appear ...")
|
||||
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during process check")
|
||||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
if _, ierr := apiServerPID(cr); ierr != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("apiserver process never appeared")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
// apiServerPID returns our best guess to the apiserver pid
|
||||
func apiServerPID(cr command.Runner) (int, error) {
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "pgrep", "-xnf", "kube-apiserver.*minikube.*"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s := strings.TrimSpace(rr.Stdout.String())
|
||||
return strconv.Atoi(s)
|
||||
}
|
||||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, hostname string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
healthz := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during healthz check")
|
||||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
status, err := apiServerHealthz(hostname, port)
|
||||
if err != nil {
|
||||
glog.Warningf("status: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if status != state.Running {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil {
|
||||
return fmt.Errorf("apiserver healthz never reported healthy")
|
||||
}
|
||||
|
||||
vcheck := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during version check")
|
||||
}
|
||||
if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil {
|
||||
glog.Warningf("api server version match failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil {
|
||||
return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion)
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIServerVersionMatch checks if the server version matches the expected
|
||||
func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error {
|
||||
vi, err := client.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server version")
|
||||
}
|
||||
glog.Infof("control plane version: %s", vi)
|
||||
if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 {
|
||||
return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIServerStatus returns apiserver status in libmachine style state.State
|
||||
func APIServerStatus(cr command.Runner, hostname string, port int) (state.State, error) {
|
||||
glog.Infof("Checking apiserver status ...")
|
||||
|
||||
pid, err := apiServerPID(cr)
|
||||
if err != nil {
|
||||
glog.Warningf("stopped: unable to get apiserver pid: %v", err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
|
||||
// Get the freezer cgroup entry for this pid
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid)))
|
||||
if err != nil {
|
||||
glog.Warningf("unable to find freezer cgroup: %v", err)
|
||||
return apiServerHealthz(hostname, port)
|
||||
|
||||
}
|
||||
freezer := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("apiserver freezer: %q", freezer)
|
||||
fparts := strings.Split(freezer, ":")
|
||||
if len(fparts) != 3 {
|
||||
glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer)
|
||||
return apiServerHealthz(hostname, port)
|
||||
}
|
||||
|
||||
rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state")))
|
||||
if err != nil {
|
||||
glog.Errorf("unable to get freezer state: %s", rr.Stderr.String())
|
||||
return apiServerHealthz(hostname, port)
|
||||
}
|
||||
|
||||
fs := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("freezer state: %q", fs)
|
||||
if fs == "FREEZING" || fs == "FROZEN" {
|
||||
return state.Paused, nil
|
||||
}
|
||||
return apiServerHealthz(hostname, port)
|
||||
}
|
||||
|
||||
// apiServerHealthz hits the /healthz endpoint and returns libmachine style state.State
|
||||
func apiServerHealthz(hostname string, port int) (state.State, error) {
|
||||
url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(hostname, fmt.Sprint(port)))
|
||||
glog.Infof("Checking apiserver healthz at %s ...", url)
|
||||
// To avoid: x509: certificate signed by unknown authority
|
||||
tr := &http.Transport{
|
||||
Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set.
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
resp, err := client.Get(url)
|
||||
// Connection refused, usually.
|
||||
if err != nil {
|
||||
glog.Infof("stopped: %s: %v", url, err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
glog.Errorf("%s returned code %d (unauthorized). Please ensure that your apiserver authorization settings make sense!", url, resp.StatusCode)
|
||||
return state.Error, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
glog.Warningf("%s response: %v %+v", url, err, resp)
|
||||
return state.Error, nil
|
||||
}
|
||||
return state.Running, nil
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kverify verifies a running kubernetes cluster is healthy
|
||||
package kverify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// WaitForDefaultSA waits for the default service account to be created.
|
||||
func WaitForDefaultSA(cs *kubernetes.Clientset, timeout time.Duration) error {
|
||||
glog.Info("waiting for default service account to be created ...")
|
||||
start := time.Now()
|
||||
saReady := func() error {
|
||||
// equivalent to manual check of 'kubectl --context profile get serviceaccount default'
|
||||
sas, err := cs.CoreV1().ServiceAccounts("default").List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Infof("temproary error waiting for default SA: %v", err)
|
||||
return err
|
||||
}
|
||||
for _, sa := range sas.Items {
|
||||
if sa.Name == "default" {
|
||||
glog.Infof("found service account: %q", sa.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("couldn't find default service account")
|
||||
}
|
||||
if err := retry.Expo(saReady, 500*time.Millisecond, timeout); err != nil {
|
||||
return errors.Wrapf(err, "waited %s for SA", time.Since(start))
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s for default service account to be created ...", time.Since(start))
|
||||
return nil
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -18,23 +18,15 @@ limitations under the License.
|
|||
package kverify
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
|
@ -47,40 +39,37 @@ import (
|
|||
// minLogCheckTime how long to wait before spamming error logs to console
|
||||
const minLogCheckTime = 60 * time.Second
|
||||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver process to appear ...")
|
||||
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during process check")
|
||||
}
|
||||
const (
|
||||
// APIServerWaitKey is the name used in the flags for k8s api server
|
||||
APIServerWaitKey = "apiserver"
|
||||
// SystemPodsWaitKey is the name used in the flags for pods in the kube system
|
||||
SystemPodsWaitKey = "system_pods"
|
||||
// DefaultSAWaitKey is the name used in the flags for default service account
|
||||
DefaultSAWaitKey = "default_sa"
|
||||
)
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
// vars related to the --wait flag
|
||||
var (
|
||||
// DefaultComponents is map of the the default components to wait for
|
||||
DefaultComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true}
|
||||
// NoWaitComponents is map of componets to wait for if specified 'none' or 'false'
|
||||
NoComponents = map[string]bool{APIServerWaitKey: false, SystemPodsWaitKey: false, DefaultSAWaitKey: false}
|
||||
// AllComponents is map for waiting for all components.
|
||||
AllComponents = map[string]bool{APIServerWaitKey: true, SystemPodsWaitKey: true, DefaultSAWaitKey: true}
|
||||
// DefaultWaitList is list of all default components to wait for. only names to be used for start flags.
|
||||
DefaultWaitList = []string{APIServerWaitKey, SystemPodsWaitKey}
|
||||
// AllComponentsList list of all valid components keys to wait for. only names to be used used for start flags.
|
||||
AllComponentsList = []string{APIServerWaitKey, SystemPodsWaitKey, DefaultSAWaitKey}
|
||||
)
|
||||
|
||||
if _, ierr := apiServerPID(cr); ierr != nil {
|
||||
return false, nil
|
||||
// ShouldWait will return true if the config says need to wait
|
||||
func ShouldWait(wcs map[string]bool) bool {
|
||||
for _, c := range AllComponentsList {
|
||||
if wcs[c] {
|
||||
return true
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("apiserver process never appeared")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for apiserver process to appear ...", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
// apiServerPID returns our best guess to the apiserver pid
|
||||
func apiServerPID(cr command.Runner) (int, error) {
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "pgrep", "-xnf", "kube-apiserver.*minikube.*"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
s := strings.TrimSpace(rr.Stdout.String())
|
||||
return strconv.Atoi(s)
|
||||
return false
|
||||
}
|
||||
|
||||
// ExpectedComponentsRunning returns whether or not all expected components are running
|
||||
|
@ -145,105 +134,6 @@ func podStatusMsg(pod core.Pod) string {
|
|||
return sb.String()
|
||||
}
|
||||
|
||||
// WaitForSystemPods verifies essential pods for running kurnetes is running
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
glog.Info("waiting for kube-system pods to appear ...")
|
||||
pStart := time.Now()
|
||||
|
||||
podList := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during pod check")
|
||||
}
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
// Wait for any system pod, as waiting for apiserver may block until etcd
|
||||
pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("pod list returned error: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
for _, pod := range pods.Items {
|
||||
glog.Infof(podStatusMsg(pod))
|
||||
}
|
||||
|
||||
if len(pods.Items) < 2 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, podList); err != nil {
|
||||
return fmt.Errorf("apiserver never returned a pod list")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
healthz := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during healthz check")
|
||||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
status, err := apiServerHealthz(net.ParseIP(ip), port)
|
||||
if err != nil {
|
||||
glog.Warningf("status: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if status != state.Running {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil {
|
||||
return fmt.Errorf("apiserver healthz never reported healthy")
|
||||
}
|
||||
|
||||
vcheck := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during version check")
|
||||
}
|
||||
if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil {
|
||||
glog.Warningf("api server version match failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil {
|
||||
return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion)
|
||||
}
|
||||
|
||||
glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIServerVersionMatch checks if the server version matches the expected
|
||||
func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error {
|
||||
vi, err := client.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "server version")
|
||||
}
|
||||
glog.Infof("control plane version: %s", vi)
|
||||
if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 {
|
||||
return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// announceProblems checks for problems, and slows polling down if any are found
|
||||
func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner) {
|
||||
problems := logs.FindProblems(r, bs, cfg, cr)
|
||||
|
@ -253,72 +143,6 @@ func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg conf
|
|||
}
|
||||
}
|
||||
|
||||
// APIServerStatus returns apiserver status in libmachine style state.State
|
||||
func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error) {
|
||||
glog.Infof("Checking apiserver status ...")
|
||||
|
||||
pid, err := apiServerPID(cr)
|
||||
if err != nil {
|
||||
glog.Warningf("stopped: unable to get apiserver pid: %v", err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
|
||||
// Get the freezer cgroup entry for this pid
|
||||
rr, err := cr.RunCmd(exec.Command("sudo", "egrep", "^[0-9]+:freezer:", fmt.Sprintf("/proc/%d/cgroup", pid)))
|
||||
if err != nil {
|
||||
glog.Warningf("unable to find freezer cgroup: %v", err)
|
||||
return apiServerHealthz(ip, port)
|
||||
|
||||
}
|
||||
freezer := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("apiserver freezer: %q", freezer)
|
||||
fparts := strings.Split(freezer, ":")
|
||||
if len(fparts) != 3 {
|
||||
glog.Warningf("unable to parse freezer - found %d parts: %s", len(fparts), freezer)
|
||||
return apiServerHealthz(ip, port)
|
||||
}
|
||||
|
||||
rr, err = cr.RunCmd(exec.Command("sudo", "cat", path.Join("/sys/fs/cgroup/freezer", fparts[2], "freezer.state")))
|
||||
if err != nil {
|
||||
glog.Errorf("unable to get freezer state: %s", rr.Stderr.String())
|
||||
return apiServerHealthz(ip, port)
|
||||
}
|
||||
|
||||
fs := strings.TrimSpace(rr.Stdout.String())
|
||||
glog.Infof("freezer state: %q", fs)
|
||||
if fs == "FREEZING" || fs == "FROZEN" {
|
||||
return state.Paused, nil
|
||||
}
|
||||
return apiServerHealthz(ip, port)
|
||||
}
|
||||
|
||||
// apiServerHealthz hits the /healthz endpoint and returns libmachine style state.State
|
||||
func apiServerHealthz(ip net.IP, port int) (state.State, error) {
|
||||
url := fmt.Sprintf("https://%s/healthz", net.JoinHostPort(ip.String(), fmt.Sprint(port)))
|
||||
glog.Infof("Checking apiserver healthz at %s ...", url)
|
||||
// To avoid: x509: certificate signed by unknown authority
|
||||
tr := &http.Transport{
|
||||
Proxy: nil, // To avoid connectiv issue if http(s)_proxy is set.
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
resp, err := client.Get(url)
|
||||
// Connection refused, usually.
|
||||
if err != nil {
|
||||
glog.Infof("stopped: %s: %v", url, err)
|
||||
return state.Stopped, nil
|
||||
}
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
glog.Errorf("%s returned code %d (unauthorized). Please ensure that your apiserver authorization settings make sense!", url, resp.StatusCode)
|
||||
return state.Error, nil
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
glog.Warningf("%s response: %v %+v", url, err, resp)
|
||||
return state.Error, nil
|
||||
}
|
||||
return state.Running, nil
|
||||
}
|
||||
|
||||
// KubeletStatus checks the kubelet status
|
||||
func KubeletStatus(cr command.Runner) (state.State, error) {
|
||||
glog.Infof("Checking kubelet status ...")
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kverify verifies a running kubernetes cluster is healthy
|
||||
package kverify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
)
|
||||
|
||||
// WaitForSystemPods verifies essential pods for running kurnetes is running
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
glog.Info("waiting for kube-system pods to appear ...")
|
||||
pStart := time.Now()
|
||||
|
||||
podList := func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
return false, fmt.Errorf("cluster wait timed out during pod check")
|
||||
}
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
// Wait for any system pod, as waiting for apiserver may block until etcd
|
||||
pods, err := client.CoreV1().Pods("kube-system").List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("pod list returned error: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("%d kube-system pods found", len(pods.Items))
|
||||
for _, pod := range pods.Items {
|
||||
glog.Infof(podStatusMsg(pod))
|
||||
}
|
||||
|
||||
if len(pods.Items) < 2 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, podList); err != nil {
|
||||
return fmt.Errorf("apiserver never returned a pod list")
|
||||
}
|
||||
glog.Infof("duration metric: took %s to wait for pod list to return data ...", time.Since(pStart))
|
||||
return nil
|
||||
}
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -44,6 +44,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -37,6 +37,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -44,6 +44,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -36,6 +36,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -37,6 +37,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,8 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -51,6 +51,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -41,6 +41,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -42,6 +42,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -48,6 +48,7 @@ networking:
|
|||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -46,3 +46,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: 1.1.1.1
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -37,3 +37,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -43,3 +43,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: "192.168.32.0/20"
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -46,3 +46,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -36,3 +36,16 @@ networking:
|
|||
dnsDomain: 1.1.1.1
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
|
@ -37,3 +37,16 @@ networking:
|
|||
dnsDomain: cluster.local
|
||||
podSubnet: ""
|
||||
serviceSubnet: 10.96.0.0/12
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
# disable disk resource management by default
|
||||
imageGCHighThresholdPercent: 100
|
||||
evictionHard:
|
||||
nodefs.available: "0%"
|
||||
nodefs.inodesFree: "0%"
|
||||
imagefs.available: "0%"
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 1.1.1.1:10249
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue