Merge branch 'master' of https://github.com/kubernetes/minikube into add-tracing

pull/9723/head
Priya Wadhwa 2020-11-24 10:29:06 -08:00
commit 63e5021065
106 changed files with 2150 additions and 511 deletions

View File

@ -1,5 +1,22 @@
# Release Notes
## Version 1.15.1 - 2020-11-16
Feature:
* Add Support for driver name alias [#9672](https://github.com/kubernetes/minikube/pull/9672)
Bug fix:
* less verbose language selector [#9715](https://github.com/kubernetes/minikube/pull/9715)
Thank you to our contributors for this release!
- Ben Leggett
- Medya Ghazizadeh
- Priya Wadhwa
- Sadlil
- Sharif Elgamal
- Vasilyev, Viacheslav
## Version 1.15.0 - 2020-11-13
Features:

View File

@ -15,7 +15,7 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 15
VERSION_BUILD ?= 0
VERSION_BUILD ?= 1
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION ?= v$(RAW_VERSION)
@ -23,7 +23,7 @@ KUBERNETES_VERSION ?= $(shell egrep "DefaultKubernetesVersion =" pkg/minikube/co
KIC_VERSION ?= $(shell egrep "Version =" pkg/drivers/kic/types.go | cut -d \" -f2)
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
ISO_VERSION ?= v1.15.0
ISO_VERSION ?= v1.15.2-snapshot
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
RPM_VERSION ?= $(DEB_VERSION)

View File

@ -130,7 +130,7 @@ func profileStatus(p *config.Profile, api libmachine.API) string {
func renderProfilesTable(ps [][]string) {
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status"})
table.SetHeader([]string{"Profile", "VM Driver", "Runtime", "IP", "Port", "Version", "Status", "Nodes"})
table.SetAutoFormatHeaders(false)
table.SetBorders(tablewriter.Border{Left: true, Top: true, Right: true, Bottom: true})
table.SetCenterSeparator("|")
@ -146,7 +146,7 @@ func profilesToTableData(profiles []*config.Profile) [][]string {
exit.Error(reason.GuestCpConfig, "error getting primary control plane", err)
}
data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status})
data = append(data, []string{p.Name, p.Config.Driver, p.Config.KubernetesConfig.ContainerRuntime, cp.IP, strconv.Itoa(cp.Port), p.Config.KubernetesConfig.KubernetesVersion, p.Status, strconv.Itoa(len(p.Config.Nodes))})
}
return data
}

View File

@ -27,9 +27,11 @@ import (
"os/exec"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/constants"
@ -43,7 +45,31 @@ import (
"k8s.io/minikube/pkg/minikube/sysinit"
)
var dockerEnvTmpl = fmt.Sprintf("{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}{{ if .NoProxyVar }}{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}{{end}}{{ .UsageHint }}", constants.DockerTLSVerifyEnv, constants.DockerHostEnv, constants.DockerCertPathEnv, constants.MinikubeActiveDockerdEnv)
var dockerSetEnvTmpl = fmt.Sprintf(
"{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerTLSVerify }}{{ .Suffix }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerHost }}{{ .Suffix }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .DockerCertPath }}{{ .Suffix }}"+
"{{ if .ExistingDockerTLSVerify }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .ExistingDockerTLSVerify }}{{ .Suffix }}"+
"{{ end }}"+
"{{ if .ExistingDockerHost }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .ExistingDockerHost }}{{ .Suffix }}"+
"{{ end }}"+
"{{ if .ExistingDockerCertPath }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .ExistingDockerCertPath }}{{ .Suffix }}"+
"{{ end }}"+
"{{ .Prefix }}%s{{ .Delimiter }}{{ .MinikubeDockerdProfile }}{{ .Suffix }}"+
"{{ if .NoProxyVar }}"+
"{{ .Prefix }}{{ .NoProxyVar }}{{ .Delimiter }}{{ .NoProxyValue }}{{ .Suffix }}"+
"{{ end }}"+
"{{ .UsageHint }}",
constants.DockerTLSVerifyEnv,
constants.DockerHostEnv,
constants.DockerCertPathEnv,
constants.ExistingDockerTLSVerifyEnv,
constants.ExistingDockerHostEnv,
constants.ExistingDockerCertPathEnv,
constants.MinikubeActiveDockerdEnv)
// DockerShellConfig represents the shell config for Docker
type DockerShellConfig struct {
@ -54,6 +80,10 @@ type DockerShellConfig struct {
MinikubeDockerdProfile string
NoProxyVar string
NoProxyValue string
ExistingDockerCertPath string
ExistingDockerHost string
ExistingDockerTLSVerify string
}
var (
@ -81,6 +111,11 @@ func dockerShellCfgSet(ec DockerEnvConfig, envMap map[string]string) *DockerShel
s.DockerCertPath = envMap[constants.DockerCertPathEnv]
s.DockerHost = envMap[constants.DockerHostEnv]
s.DockerTLSVerify = envMap[constants.DockerTLSVerifyEnv]
s.ExistingDockerCertPath = envMap[constants.ExistingDockerCertPathEnv]
s.ExistingDockerHost = envMap[constants.ExistingDockerHostEnv]
s.ExistingDockerTLSVerify = envMap[constants.ExistingDockerTLSVerifyEnv]
s.MinikubeDockerdProfile = envMap[constants.MinikubeActiveDockerdEnv]
if ec.noProxy {
@ -117,14 +152,34 @@ func (EnvNoProxyGetter) GetNoProxyVar() (string, string) {
return noProxyVar, noProxyValue
}
// ensureDockerd ensures dockerd inside minikube is running before a docker-env command
func ensureDockerd(name string, r command.Runner) {
if ok := isDockerActive(r); ok {
return
}
mustRestartDockerd(name, r)
}
// isDockerActive checks if Docker is active
func isDockerActive(r command.Runner) bool {
return sysinit.New(r).Active("docker")
}
func mustRestartDocker(name string, runner command.Runner) {
if err := sysinit.New(runner).Restart("docker"); err != nil {
exit.Message(reason.RuntimeRestart, `The Docker service within '{{.name}}' is not active`, out.V{"name": name})
// mustRestartDockerd will attempt to reload dockerd if fails, will try restart and exit if fails again
func mustRestartDockerd(name string, runner command.Runner) {
// Docker Docs: https://docs.docker.com/config/containers/live-restore
// On Linux, you can avoid a restart (and avoid any downtime for your containers) by reloading the Docker daemon.
klog.Warningf("dockerd is not active will try to reload it...")
if err := sysinit.New(runner).Reload("docker"); err != nil {
klog.Warningf("will try to restart dockerd because reload failed: %v", err)
if err := sysinit.New(runner).Restart("docker"); err != nil {
exit.Message(reason.RuntimeRestart, `The Docker service within '{{.name}}' is not active`, out.V{"name": name})
}
// if we get to the point that we have to restart docker (instead of reload)
// will need to wait for apisever container to come up, this usually takes 5 seconds
// verifying apisever using kverify would add code complexity for a rare case.
klog.Warningf("waiting 5 seconds to ensure apisever container is up...")
time.Sleep(time.Second * 5)
}
}
@ -134,8 +189,17 @@ var dockerEnvCmd = &cobra.Command{
Short: "Configure environment to use minikube's Docker daemon",
Long: `Sets up docker env variables; similar to '$(docker-machine env)'.`,
Run: func(cmd *cobra.Command, args []string) {
var err error
shl := shell.ForceShell
if shl == "" {
shl, err = shell.Detect()
if err != nil {
exit.Error(reason.InternalShellDetect, "Error detecting shell", err)
}
}
sh := shell.EnvConfig{
Shell: shell.ForceShell,
Shell: shl,
}
if dockerUnset {
@ -162,12 +226,8 @@ var dockerEnvCmd = &cobra.Command{
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
}
if ok := isDockerActive(co.CP.Runner); !ok {
klog.Warningf("dockerd is not active will try to restart it...")
mustRestartDocker(cname, co.CP.Runner)
}
ensureDockerd(cname, co.CP.Runner)
var err error
port := constants.DockerDaemonPort
if driver.NeedsPortForward(driverName) {
port, err = oci.ForwardedPort(driverName, cname, port)
@ -186,13 +246,6 @@ var dockerEnvCmd = &cobra.Command{
noProxy: noProxy,
}
if ec.Shell == "" {
ec.Shell, err = shell.Detect()
if err != nil {
exit.Error(reason.InternalShellDetect, "Error detecting shell", err)
}
}
dockerPath, err := exec.LookPath("docker")
if err != nil {
klog.Warningf("Unable to find docker in path - skipping connectivity check: %v", err)
@ -203,8 +256,9 @@ var dockerEnvCmd = &cobra.Command{
out, err := tryDockerConnectivity("docker", ec)
if err != nil { // docker might be up but been loaded with wrong certs/config
// to fix issues like this #8185
klog.Warningf("couldn't connect to docker inside minikube. will try to restart dockerd service... output: %s error: %v", string(out), err)
mustRestartDocker(cname, co.CP.Runner)
// even though docker maybe running just fine it could be holding on to old certs and needs a refresh
klog.Warningf("couldn't connect to docker inside minikube. output: %s error: %v", string(out), err)
mustRestartDockerd(cname, co.CP.Runner)
}
}
@ -228,7 +282,7 @@ type DockerEnvConfig struct {
// dockerSetScript writes out a shell-compatible 'docker-env' script
func dockerSetScript(ec DockerEnvConfig, w io.Writer) error {
envVars := dockerEnvVars(ec)
return shell.SetScript(ec.EnvConfig, w, dockerEnvTmpl, dockerShellCfgSet(ec, envVars))
return shell.SetScript(ec.EnvConfig, w, dockerSetEnvTmpl, dockerShellCfgSet(ec, envVars))
}
// dockerSetScript writes out a shell-compatible 'docker-env unset' script
@ -246,7 +300,6 @@ func dockerUnsetScript(ec DockerEnvConfig, w io.Writer) error {
vars = append(vars, k)
}
}
return shell.UnsetScript(ec.EnvConfig, w, vars)
}
@ -257,14 +310,21 @@ func dockerURL(ip string, port int) string {
// dockerEnvVars gets the necessary docker env variables to allow the use of minikube's docker daemon
func dockerEnvVars(ec DockerEnvConfig) map[string]string {
env := map[string]string{
rt := map[string]string{
constants.DockerTLSVerifyEnv: "1",
constants.DockerHostEnv: dockerURL(ec.hostIP, ec.port),
constants.DockerCertPathEnv: ec.certsDir,
constants.MinikubeActiveDockerdEnv: ec.profile,
}
return env
if os.Getenv(constants.MinikubeActiveDockerdEnv) == "" {
for _, env := range constants.DockerDaemonEnvs {
if v := oci.InitialEnv(env); v != "" {
key := constants.MinikubeExistingPrefix + env
rt[key] = v
}
}
}
return rt
}
// dockerEnvVarsList gets the necessary docker env variables to allow the use of minikube's docker daemon to be used in a exec.Command

View File

@ -52,7 +52,10 @@ export MINIKUBE_ACTIVE_DOCKERD="dockerdriver"
# To point your shell to minikube's docker-daemon, run:
# eval $(minikube -p dockerdriver docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
},
{
@ -67,7 +70,10 @@ export MINIKUBE_ACTIVE_DOCKERD="bash"
# To point your shell to minikube's docker-daemon, run:
# eval $(minikube -p bash docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
},
{
@ -82,7 +88,10 @@ export MINIKUBE_ACTIVE_DOCKERD="ipv6"
# To point your shell to minikube's docker-daemon, run:
# eval $(minikube -p ipv6 docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
`,
},
{
@ -115,7 +124,10 @@ $Env:MINIKUBE_ACTIVE_DOCKERD = "powershell"
# & minikube -p powershell docker-env | Invoke-Expression
`,
`Remove-Item Env:\\DOCKER_TLS_VERIFY Env:\\DOCKER_HOST Env:\\DOCKER_CERT_PATH Env:\\MINIKUBE_ACTIVE_DOCKERD
`Remove-Item Env:\\DOCKER_TLS_VERIFY
Remove-Item Env:\\DOCKER_HOST
Remove-Item Env:\\DOCKER_CERT_PATH
Remove-Item Env:\\MINIKUBE_ACTIVE_DOCKERD
`,
},
{
@ -167,7 +179,11 @@ export NO_PROXY="127.0.0.1"
# eval $(minikube -p bash-no-proxy docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD NO_PROXY
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset NO_PROXY;
`,
},
{
@ -184,7 +200,11 @@ export no_proxy="127.0.0.1"
# eval $(minikube -p bash-no-proxy-lower docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD no_proxy
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset no_proxy;
`,
},
{
@ -200,7 +220,11 @@ $Env:no_proxy = "192.168.0.1"
# & minikube -p powershell-no-proxy-idempotent docker-env | Invoke-Expression
`,
`Remove-Item Env:\\DOCKER_TLS_VERIFY Env:\\DOCKER_HOST Env:\\DOCKER_CERT_PATH Env:\\MINIKUBE_ACTIVE_DOCKERD Env:\\no_proxy
`Remove-Item Env:\\DOCKER_TLS_VERIFY
Remove-Item Env:\\DOCKER_HOST
Remove-Item Env:\\DOCKER_CERT_PATH
Remove-Item Env:\\MINIKUBE_ACTIVE_DOCKERD
Remove-Item Env:\\no_proxy
`,
},
{
@ -217,7 +241,11 @@ export NO_PROXY="192.168.0.1,10.0.0.4,127.0.0.1"
# eval $(minikube -p sh-no-proxy-add docker-env)
`,
`unset DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD NO_PROXY
`unset DOCKER_TLS_VERIFY;
unset DOCKER_HOST;
unset DOCKER_CERT_PATH;
unset MINIKUBE_ACTIVE_DOCKERD;
unset NO_PROXY;
`,
},
{
@ -229,7 +257,10 @@ DOCKER_HOST=tcp://127.0.0.1:32842
DOCKER_CERT_PATH=/certs
MINIKUBE_ACTIVE_DOCKERD=noneshell
`,
`DOCKER_TLS_VERIFY DOCKER_HOST DOCKER_CERT_PATH MINIKUBE_ACTIVE_DOCKERD
`DOCKER_TLS_VERIFY
DOCKER_HOST
DOCKER_CERT_PATH
MINIKUBE_ACTIVE_DOCKERD
`,
},
}

View File

@ -49,7 +49,8 @@ export MINIKUBE_ACTIVE_PODMAN="bash"
# To point your shell to minikube's podman service, run:
# eval $(minikube -p bash podman-env)
`,
`unset PODMAN_VARLINK_BRIDGE MINIKUBE_ACTIVE_PODMAN
`unset PODMAN_VARLINK_BRIDGE;
unset MINIKUBE_ACTIVE_PODMAN;
`,
},
{
@ -62,7 +63,9 @@ export MINIKUBE_ACTIVE_PODMAN="bash"
# To point your shell to minikube's podman service, run:
# eval $(minikube -p bash podman-env)
`,
`unset CONTAINER_HOST CONTAINER_SSHKEY MINIKUBE_ACTIVE_PODMAN
`unset CONTAINER_HOST;
unset CONTAINER_SSHKEY;
unset MINIKUBE_ACTIVE_PODMAN;
`,
},
}

View File

@ -248,17 +248,6 @@ func runStart(cmd *cobra.Command, args []string) {
})
}
}
if existing.KubernetesConfig.ContainerRuntime == "crio" {
// Stop and start again if it's crio because it's broken above v1.17.3
out.WarningT("Due to issues with CRI-O post v1.17.3, we need to restart your cluster.")
out.WarningT("See details at https://github.com/kubernetes/minikube/issues/8861")
stopProfile(existing.Name)
starter, err = provisionWithDriver(cmd, ds, existing)
if err != nil {
exitGuestProvision(err)
}
}
}
kubeconfig, err := startWithDriver(cmd, starter, existing)

View File

@ -99,12 +99,13 @@ func runStop(cmd *cobra.Command, args []string) {
schedule.KillExisting(profilesToStop)
if scheduledStopDuration != 0 {
if runtime.GOOS == "windows" {
exit.Message(reason.Usage, "the --schedule flag is currently not supported on windows")
}
if err := schedule.Daemonize(profilesToStop, scheduledStopDuration); err != nil {
exit.Message(reason.DaemonizeError, "unable to daemonize: {{.err}}", out.V{"err": err.Error()})
}
// if OS is windows, scheduled stop is now being handled within minikube, so return
if runtime.GOOS == "windows" {
return
}
klog.Infof("sleeping %s before completing stop...", scheduledStopDuration.String())
time.Sleep(scheduledStopDuration)
}

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: Namespace
@ -166,7 +180,7 @@ spec:
containers:
- name: ambassador-operator
# Replace this with the built image name
image: quay.io/datawire/ambassador-operator:v1.2.3
image: {{default "quay.io/datawire" .ImageRepository}}/ambassador-operator:v1.2.3
command:
- ambassador-operator
imagePullPolicy: Always

View File

@ -1,9 +0,0 @@
apiVersion: getambassador.io/v2
kind: AmbassadorInstallation
metadata:
name: ambassador
namespace: ambassador
spec:
installOSS: true
helmValues:
deploymentTool: amb-oper-minikube

View File

@ -0,0 +1,23 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: getambassador.io/v2
kind: AmbassadorInstallation
metadata:
name: ambassador
namespace: ambassador
spec:
installOSS: true
helmValues:
deploymentTool: amb-oper-minikube

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: Service
apiVersion: v1
metadata:
@ -43,7 +57,7 @@ spec:
serviceAccountName: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.0-rc1
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-attacher:v3.0.0-rc1
args:
- --v=5
- --csi-address=/csi/csi.sock

View File

@ -1,13 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
namespace: kube-system
spec:
# Supports persistent and ephemeral inline volumes.
volumeLifecycleModes:
- Persistent
- Ephemeral
# To determine at runtime which mode a volume uses, pod info and its
# "csi.storage.k8s.io/ephemeral" entry are needed.
podInfoOnMount: true

View File

@ -0,0 +1,27 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
namespace: kube-system
spec:
# Supports persistent and ephemeral inline volumes.
volumeLifecycleModes:
- Persistent
- Ephemeral
# To determine at runtime which mode a volume uses, pod info and its
# "csi.storage.k8s.io/ephemeral" entry are needed.
podInfoOnMount: true

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Service defined here, plus serviceName below in StatefulSet,
# are needed only because of condition explained in
# https://github.com/kubernetes/kubernetes/issues/69608
@ -39,7 +53,7 @@ spec:
spec:
containers:
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-node-driver-registrar:v1.3.0
args:
- --v=5
- --csi-address=/csi/csi.sock
@ -64,7 +78,7 @@ spec:
name: csi-data-dir
- name: hostpath
image: quay.io/k8scsi/hostpathplugin:v1.4.0-rc2
image: {{default "quay.io/k8scsi" .ImageRepository}}/hostpathplugin:v1.4.0-rc2
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
@ -109,7 +123,7 @@ spec:
volumeMounts:
- mountPath: /csi
name: socket-dir
image: quay.io/k8scsi/livenessprobe:v1.1.0
image: {{default "quay.io/k8scsi" .ImageRepository}}/livenessprobe:v1.1.0
args:
- --csi-address=/csi/csi.sock
- --health-port=9898

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: Service
apiVersion: v1
metadata:
@ -43,7 +57,7 @@ spec:
serviceAccountName: csi-provisioner
containers:
- name: csi-provisioner
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v2.0.0-rc2
image: {{default "gcr.io/k8s-staging-sig-storage" .ImageRepository}}/csi-provisioner:v2.0.0-rc2
args:
- -v=5
- --csi-address=/csi/csi.sock

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: Service
apiVersion: v1
metadata:
@ -43,7 +57,7 @@ spec:
serviceAccountName: csi-resizer
containers:
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:v0.6.0-rc1
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-resizer:v0.6.0-rc1
args:
- -v=5
- -csi-address=/csi/csi.sock

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: Service
apiVersion: v1
metadata:
@ -43,7 +57,7 @@ spec:
serviceAccount: csi-snapshotter
containers:
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v2.1.0
image: {{default "quay.io/k8scsi" .ImageRepository}}/csi-snapshotter:v2.1.0
args:
- -v=5
- --csi-address=/csi/csi.sock

View File

@ -1,7 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-hostpath-sc
provisioner: hostpath.csi.k8s.io #csi-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -0,0 +1,21 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-hostpath-sc
provisioner: hostpath.csi.k8s.io #csi-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This YAML file contains all RBAC objects that are necessary to run external
# CSI attacher.
#

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This YAML file contains all RBAC objects that are necessary to run external
# CSI resizer.
#

View File

@ -1,3 +1,17 @@
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# RBAC file for the snapshot controller.
#
# The snapshot controller implements the control loop for CSI snapshot functionality.

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: gcp-auth
namespace: gcp-auth
spec:
ports:
- port: 443
targetPort: 8443
protocol: TCP
selector:
app: gcp-auth

View File

@ -0,0 +1,26 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
name: gcp-auth
namespace: gcp-auth
spec:
ports:
- port: 443
targetPort: 8443
protocol: TCP
selector:
app: gcp-auth

View File

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: ServiceAccount

View File

@ -50,7 +50,7 @@ spec:
hostPath:
path: /
initContainers:
- image: {{default "k8s.gcr.io" .ImageRepository}}/minikube-nvidia-driver-installer
- image: {{default "k8s.gcr.io" .ImageRepository}}/minikube-nvidia-driver-installer:e2d9b43228decf5d6f7dce3f0a85d390f138fa01
name: nvidia-driver-installer
resources:
requests:

View File

@ -1,3 +1,17 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
@ -10845,4 +10859,4 @@ spec:
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: string

View File

@ -1,3 +1,17 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: Namespace
@ -334,4 +348,4 @@ spec:
sourceType: grpc
image: quay.io/operator-framework/upstream-community-operators:latest
displayName: Community Operators
publisher: OperatorHub.io
publisher: OperatorHub.io

View File

@ -106,7 +106,7 @@ spec:
serviceAccountName: glusterfile-provisioner
containers:
- name: glusterfile-provisioner
image: gluster/glusterfile-provisioner:latest
image: {{default "gluster" .ImageRepository}}/glusterfile-provisioner:latest
imagePullPolicy: Always
env:
- name: PROVISIONER_NAME

View File

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# RBAC file for the volume snapshot controller.
apiVersion: v1
kind: ServiceAccount

View File

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@ -1,3 +1,17 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:

View File

@ -14,4 +14,5 @@ menu "System tools"
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/vbox-guest/Config.in"
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/containerd-bin/Config.in"
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/falco-module/Config.in"
source "$BR2_EXTERNAL_MINIKUBE_PATH/package/scheduled-stop/Config.in"
endmenu

View File

@ -0,0 +1,3 @@
config BR2_PACKAGE_SCHEDULED_STOP
bool "scheduled-stop"
default y

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -x
echo "running scheduled stop ...";
echo "sleeping %$SLEEP seconds..."
sleep $SLEEP
echo "running poweroff..."
sudo systemctl poweroff

View File

@ -0,0 +1,11 @@
[Unit]
Description=minikube scheduled stop
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=root
ExecStart=/usr/sbin/minikube-scheduled-stop
EnvironmentFile=/var/lib/minikube/scheduled-stop/environment

View File

@ -0,0 +1,23 @@
################################################################################
#
# minikube scheduled-stop
#
################################################################################
define SCHEDULED_STOP_INSTALL_INIT_SYSTEMD
$(INSTALL) -D -m 644 \
$(SCHEDULED_STOP_PKGDIR)/minikube-scheduled-stop.service \
$(TARGET_DIR)/usr/lib/systemd/system/minikube-scheduled-stop.service
mkdir -p $(TARGET_DIR)/etc/systemd/system/multi-user.target.wants
ln -fs /usr/lib/systemd/system/minikube-scheduled-stop.service \
$(TARGET_DIR)/etc/systemd/system/multi-user.target.wants/minikube-scheduled-stop.service
endef
define SCHEDULED_STOP_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 \
$(SCHEDULED_STOP_PKGDIR)/minikube-scheduled-stop \
$(TARGET_DIR)/usr/sbin/minikube-scheduled-stop
endef
$(eval $(generic-package))

View File

@ -130,6 +130,13 @@ COPY automount/minikube-automount.service /usr/lib/systemd/system/minikube-autom
RUN ln -fs /usr/lib/systemd/system/minikube-automount.service \
/etc/systemd/system/multi-user.target.wants/minikube-automount.service
# scheduled stop service
COPY scheduled-stop/minikube-scheduled-stop /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
COPY scheduled-stop/minikube-scheduled-stop.service /usr/lib/systemd/system/minikube-scheduled-stop.service
RUN ln -fs /usr/lib/systemd/system/minikube-scheduled-stop.service \
/etc/systemd/system/multi-user.target.wants/minikube-scheduled-stop.service && \
chmod +x /var/lib/minikube/scheduled-stop/minikube-scheduled-stop
# disable non-docker runtimes by default
RUN systemctl disable containerd && systemctl disable crio && rm /etc/crictl.yaml
# enable docker which is default

View File

@ -0,0 +1,11 @@
#!/bin/bash
set -x
echo "running scheduled stop ...";
echo "sleeping %$SLEEP seconds..."
sleep $SLEEP
echo "running poweroff..."
sudo systemctl poweroff

View File

@ -0,0 +1,11 @@
[Unit]
Description=minikube scheduled stop
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
User=root
ExecStart=/var/lib/minikube/scheduled-stop/minikube-scheduled-stop
EnvironmentFile=/var/lib/minikube/scheduled-stop/environment

View File

@ -1,4 +1,12 @@
[
{
"name": "v1.15.1",
"checksums": {
"darwin": "ab47a4e3ff742db8a88d7bf0fe9cb9c6805e6f1df2545d8888f196c46b96f714",
"linux": "88c3bfac3880e897e4d032801b02f02fdda642b75d76ebeb5df545cd90eee409",
"windows": "89e34d6137bba7a59b74e138af28746b883bb605cbf2d37c1ff29dce008050e8"
}
},
{
"name": "v1.15.0",
"checksums": {

View File

@ -30,8 +30,10 @@ export GOPATH="$HOME/go"
export KUBECONFIG="${TEST_HOME}/kubeconfig"
export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin"
# install lsof for finding none driver procs, psmisc to use pstree in cronjobs
sudo apt-get -y install lsof psmisc
if [ "$(uname)" != "Darwin" ]; then
# install lsof for finding none driver procs, psmisc to use pstree in cronjobs
sudo apt-get -y install lsof psmisc
fi
# installing golang so we could do go get for gopogh
sudo ./installers/check_install_golang.sh "1.15.2" "/usr/local" || true

View File

@ -18,17 +18,14 @@ set -eux -o pipefail
if [[ "$OSTYPE" == "darwin"* ]]; then
echo "detected darwin, exiting"
return
exit 0
fi
echo "Installing latest docker"
sudo apt-get -y update
sudo apt-get -y install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian `lsb_release -cs` stable"
sudo apt-get -y update
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
sudo usermod -aG docker jenkins
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
rm get-docker.sh
sudo adduser jenkins docker || true
echo "Installing latest kubectl"
curl -LO "https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl"

View File

@ -35,15 +35,15 @@ jobs=(
'HyperKit_macOS'
'Hyper-V_Windows'
'VirtualBox_Linux'
'VirtualBox_macOS'
# 'VirtualBox_macOS'
'VirtualBox_Windows'
# 'KVM-GPU_Linux' - Disabled
'KVM_Linux'
'none_Linux'
'Docker_Linux'
'Docker_macOS'
'Docker_Windows'
'Podman_Linux'
# 'Docker_macOS'
# 'Docker_Windows'
# 'Podman_Linux'
)
# retry_github_status provides reliable github status updates

View File

@ -17,37 +17,58 @@ limitations under the License.
package update
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"k8s.io/klog/v2"
)
// fsUpdate updates local filesystem repo files according to the given schema and data.
// Returns if the update actually changed anything, and any error occurred.
func fsUpdate(fsRoot string, schema map[string]Item, data interface{}) (changed bool, err error) {
var mode os.FileMode = 0644
for path, item := range schema {
path = filepath.Join(fsRoot, path)
blob, err := ioutil.ReadFile(path)
if err != nil {
return false, err
// if the item's content is already set, give it precedence over any current file content
var content []byte
if item.Content == nil {
info, err := os.Stat(path)
if err != nil {
return false, fmt.Errorf("unable to get file content: %w", err)
}
mode = info.Mode()
content, err = ioutil.ReadFile(path)
if err != nil {
return false, fmt.Errorf("unable to read file content: %w", err)
}
item.Content = content
}
info, err := os.Stat(path)
if err != nil {
return false, err
if err := item.apply(data); err != nil {
return false, fmt.Errorf("unable to update file: %w", err)
}
mode := info.Mode()
item.Content = blob
chg, err := item.apply(data)
if err != nil {
return false, err
}
if chg {
if !bytes.Equal(content, item.Content) {
// make sure path exists
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return false, fmt.Errorf("unable to create directory: %w", err)
}
if err := ioutil.WriteFile(path, item.Content, mode); err != nil {
return false, fmt.Errorf("unable to write file: %w", err)
}
changed = true
}
if err := ioutil.WriteFile(path, item.Content, mode); err != nil {
return false, err
}
}
return changed, nil
}
// Loadf returns the file content read as byte slice
func Loadf(path string) []byte {
blob, err := ioutil.ReadFile(path)
if err != nil {
klog.Fatalf("Unable to load file %s: %v", path, err)
return nil
}
return blob
}

View File

@ -73,7 +73,7 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is
}
// update files
changes, err := ghUpdate(ctx, owner, repo, baseTree, token, schema, data)
changes, err := ghUpdate(ctx, owner, repo, token, schema, data)
if err != nil {
return nil, fmt.Errorf("unable to update files: %w", err)
}
@ -126,16 +126,16 @@ func ghCreatePR(ctx context.Context, owner, repo, base, branch, title string, is
klog.Infof("PR branch '%s' successfully created: %s", prBranch, prRef.GetURL())
// create PR
plan, err := GetPlan(schema, data)
_, pretty, err := GetPlan(schema, data)
if err != nil {
klog.Fatalf("Unable to parse schema: %v\n%s", err, plan)
klog.Fatalf("Unable to parse schema: %v\n%s", err, pretty)
}
modifiable := true
pr, _, err := ghc.PullRequests.Create(ctx, owner, repo, &github.NewPullRequest{
Title: github.String(title),
Head: github.String(*fork.Owner.Login + ":" + prBranch),
Base: github.String(base),
Body: github.String(fmt.Sprintf("fixes: #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, plan)),
Body: github.String(fmt.Sprintf("fixes: #%d\n\nAutomatically created PR to update repo according to the Plan:\n\n```\n%s\n```", issue, pretty)),
MaintainerCanModify: &modifiable,
})
if err != nil {
@ -170,40 +170,40 @@ func ghFindPR(ctx context.Context, title, owner, repo, base, token string) (url
// ghUpdate updates remote GitHub owner/repo tree according to the given token, schema and data.
// Returns resulting changes, and any error occurred.
func ghUpdate(ctx context.Context, owner, repo string, tree *github.Tree, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) {
func ghUpdate(ctx context.Context, owner, repo string, token string, schema map[string]Item, data interface{}) (changes []*github.TreeEntry, err error) {
ghc := ghClient(ctx, token)
// load each schema item content and update it creating new GitHub TreeEntries
cnt := len(schema) // expected number of files to change
for _, org := range tree.Entries {
if *org.Type == "blob" {
if item, match := schema[*org.Path]; match {
blob, _, err := ghc.Git.GetBlobRaw(ctx, owner, repo, *org.SHA)
if err != nil {
return nil, fmt.Errorf("unable to get file: %w", err)
}
item.Content = blob
changed, err := item.apply(data)
if err != nil {
return nil, fmt.Errorf("unable to update file: %w", err)
}
if changed {
// add github.TreeEntry that will replace original path content with the updated one
changes = append(changes, &github.TreeEntry{
Path: org.Path,
Mode: org.Mode,
Type: org.Type,
Content: github.String(string(item.Content)),
})
}
if cnt--; cnt == 0 {
break
}
for path, item := range schema {
// if the item's content is already set, give it precedence over any current file content
var content string
if item.Content == nil {
file, _, _, err := ghc.Repositories.GetContents(ctx, owner, repo, path, &github.RepositoryContentGetOptions{Ref: ghBase})
if err != nil {
return nil, fmt.Errorf("unable to get file content: %w", err)
}
content, err = file.GetContent()
if err != nil {
return nil, fmt.Errorf("unable to read file content: %w", err)
}
item.Content = []byte(content)
}
if err := item.apply(data); err != nil {
return nil, fmt.Errorf("unable to update file: %w", err)
}
if content != string(item.Content) {
// add github.TreeEntry that will replace original path content with the updated one or add new if one doesn't exist already
// ref: https://developer.github.com/v3/git/trees/#tree-object
rcPath := path // make sure to copy path variable as its reference (not value!) is passed to changes
rcMode := "100644"
rcType := "blob"
changes = append(changes, &github.TreeEntry{
Path: &rcPath,
Mode: &rcMode,
Type: &rcType,
Content: github.String(string(item.Content)),
})
}
}
if cnt != 0 {
return nil, fmt.Errorf("unable to find all the files (%d missing) - check the Plan: %w", cnt, err)
}
return changes, nil
}

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -45,7 +45,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -39,7 +39,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: 1.1.1.1
podSubnet: "10.244.0.0/16"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -42,7 +42,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1.0
kubernetesVersion: v1.19.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -30,6 +30,7 @@ import (
"context"
"time"
"golang.org/x/mod/semver"
"k8s.io/klog/v2"
"k8s.io/minikube/hack/update"
@ -54,6 +55,60 @@ var (
`'latest' for .*\)`: `'latest' for {{.LatestVersion}})`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd-api-port.yaml": {
Content: update.Loadf("templates/v1beta2/containerd-api-port.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd-pod-network-cidr.yaml": {
Content: update.Loadf("templates/v1beta2/containerd-pod-network-cidr.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/containerd.yaml": {
Content: update.Loadf("templates/v1beta2/containerd.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/crio-options-gates.yaml": {
Content: update.Loadf("templates/v1beta2/crio-options-gates.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/crio.yaml": {
Content: update.Loadf("templates/v1beta2/crio.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/default.yaml": {
Content: update.Loadf("templates/v1beta2/default.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/dns.yaml": {
Content: update.Loadf("templates/v1beta2/dns.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/image-repository.yaml": {
Content: update.Loadf("templates/v1beta2/image-repository.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
"pkg/minikube/bootstrapper/bsutil/testdata/{{.LatestVersionMM}}/options.yaml": {
Content: update.Loadf("templates/v1beta2/options.yaml"),
Replace: map[string]string{
`kubernetesVersion:.*`: `kubernetesVersion: {{.LatestVersionP0}}`,
},
},
}
// PR data
@ -64,8 +119,11 @@ var (
// Data holds greatest current stable release and greatest latest rc or beta pre-release Kubernetes versions
type Data struct {
StableVersion string `json:"StableVersion"`
LatestVersion string `json:"LatestVersion"`
StableVersion string `json:"StableVersion"`
LatestVersion string `json:"LatestVersion"`
LatestVersionMM string `json:"LatestVersionMM"` // LatestVersion in <major>.<minor> format
// for testdata: if StableVersion greater than 'LatestVersionMM.0' exists, LatestVersionP0 is 'LatestVersionMM.0', otherwise LatestVersionP0 is LatestVersion.
LatestVersionP0 string `json:"LatestVersionP0"`
}
func main() {
@ -74,12 +132,27 @@ func main() {
defer cancel()
// get Kubernetes versions from GitHub Releases
stable, latest, err := update.GHReleases(ctx, "kubernetes", "kubernetes")
if err != nil || stable == "" || latest == "" {
stable, latest, latestMM, latestP0, err := k8sVersions(ctx, "kubernetes", "kubernetes")
if err != nil || !semver.IsValid(stable) || !semver.IsValid(latest) || !semver.IsValid(latestMM) || !semver.IsValid(latestP0) {
klog.Fatalf("Unable to get Kubernetes versions: %v", err)
}
data := Data{StableVersion: stable, LatestVersion: latest}
data := Data{StableVersion: stable, LatestVersion: latest, LatestVersionMM: latestMM, LatestVersionP0: latestP0}
klog.Infof("Kubernetes versions: 'stable' is %s and 'latest' is %s", data.StableVersion, data.LatestVersion)
update.Apply(ctx, schema, data, prBranchPrefix, prTitle, prIssue)
}
// k8sVersion returns Kubernetes versions.
func k8sVersions(ctx context.Context, owner, repo string) (stable, latest, latestMM, latestP0 string, err error) {
// get Kubernetes versions from GitHub Releases
stable, latest, err = update.GHReleases(ctx, owner, repo)
if err != nil || !semver.IsValid(stable) || !semver.IsValid(latest) {
return "", "", "", "", err
}
latestMM = semver.MajorMinor(latest)
latestP0 = latestMM + ".0"
if semver.Compare(stable, latestP0) == -1 {
latestP0 = latest
}
return stable, latest, latestMM, latestP0, nil
}

View File

@ -84,33 +84,32 @@ type Item struct {
}
// apply updates Item Content by replacing all occurrences of Replace map's keys with their actual map values (with placeholders replaced with data).
func (i *Item) apply(data interface{}) (changed bool, err error) {
if i.Content == nil || i.Replace == nil {
return false, fmt.Errorf("unable to update content: nothing to update")
func (i *Item) apply(data interface{}) error {
if i.Content == nil {
return fmt.Errorf("unable to update content: nothing to update")
}
org := string(i.Content)
str := org
for src, dst := range i.Replace {
tmpl := template.Must(template.New("").Parse(dst))
buf := new(bytes.Buffer)
if err := tmpl.Execute(buf, data); err != nil {
return false, err
out, err := ParseTmpl(dst, data, "")
if err != nil {
return err
}
re := regexp.MustCompile(src)
str = re.ReplaceAllString(str, buf.String())
str = re.ReplaceAllString(str, out)
}
i.Content = []byte(str)
return str != org, nil
return nil
}
// Apply applies concrete update plan (schema + data) to GitHub or local filesystem repo
func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBranchPrefix, prTitle string, prIssue int) {
plan, err := GetPlan(schema, data)
schema, pretty, err := GetPlan(schema, data)
if err != nil {
klog.Fatalf("Unable to parse schema: %v\n%s", err, plan)
klog.Fatalf("Unable to parse schema: %v\n%s", err, pretty)
}
klog.Infof("The Plan:\n%s", plan)
klog.Infof("The Plan:\n%s", pretty)
if target == "fs" || target == "all" {
changed, err := fsUpdate(FSRoot, schema, data)
@ -125,12 +124,9 @@ func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBran
if target == "gh" || target == "all" {
// update prTitle replacing template placeholders with actual data values
tmpl := template.Must(template.New("prTitle").Parse(prTitle))
buf := new(bytes.Buffer)
if err := tmpl.Execute(buf, data); err != nil {
if prTitle, err = ParseTmpl(prTitle, data, "prTitle"); err != nil {
klog.Fatalf("Unable to parse PR Title: %v", err)
}
prTitle = buf.String()
// check if PR already exists
prURL, err := ghFindPR(ctx, prTitle, ghOwner, ghRepo, ghBase, ghToken)
@ -153,22 +149,31 @@ func Apply(ctx context.Context, schema map[string]Item, data interface{}, prBran
}
// GetPlan returns concrete plan replacing placeholders in schema with actual data values, returns JSON-formatted representation of the plan and any error occurred.
func GetPlan(schema map[string]Item, data interface{}) (prettyprint string, err error) {
for _, item := range schema {
func GetPlan(schema map[string]Item, data interface{}) (plan map[string]Item, prettyprint string, err error) {
plan = make(map[string]Item)
for p, item := range schema {
path, err := ParseTmpl(p, data, "")
if err != nil {
return plan, fmt.Sprintf("%+v", schema), err
}
plan[path] = item
}
for _, item := range plan {
for src, dst := range item.Replace {
tmpl := template.Must(template.New("").Parse(dst))
buf := new(bytes.Buffer)
if err := tmpl.Execute(buf, data); err != nil {
return fmt.Sprintf("%+v", schema), err
out, err := ParseTmpl(dst, data, "")
if err != nil {
return plan, fmt.Sprintf("%+v", schema), err
}
item.Replace[src] = buf.String()
item.Replace[src] = out
}
}
str, err := json.MarshalIndent(schema, "", " ")
str, err := json.MarshalIndent(plan, "", " ")
if err != nil {
return fmt.Sprintf("%+v", schema), err
return plan, fmt.Sprintf("%+v", schema), err
}
return string(str), nil
return plan, string(str), nil
}
// RunWithRetryNotify runs command cmd with stdin using exponential backoff for maxTime duration
@ -210,3 +215,13 @@ func Run(cmd *exec.Cmd, stdin io.Reader) error {
}
return nil
}
// ParseTmpl replaces placeholders in text with actual data values
func ParseTmpl(text string, data interface{}, name string) (string, error) {
tmpl := template.Must(template.New(name).Parse(text))
buf := new(bytes.Buffer)
if err := tmpl.Execute(buf, data); err != nil {
return "", err
}
return buf.String(), nil
}

View File

@ -30,6 +30,7 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
"k8s.io/klog/v2"
pkgdrivers "k8s.io/minikube/pkg/drivers"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/assets"
@ -93,22 +94,30 @@ func (d *Driver) Create() error {
klog.Infof("calculated static IP %q for the %q container", ip.String(), d.NodeConfig.MachineName)
params.IP = ip.String()
}
drv := d.DriverName()
listAddr := oci.DefaultBindIPV4
if oci.IsExternalDaemonHost(drv) {
out.WarningT("Listening to 0.0.0.0 on external docker host {{.host}}. Please be advised",
out.V{"host": oci.DaemonHost(drv)})
listAddr = "0.0.0.0"
}
// control plane specific options
params.PortMappings = append(params.PortMappings, oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ContainerPort: int32(params.APIServerPort),
},
params.PortMappings = append(params.PortMappings,
oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ListenAddress: listAddr,
ContainerPort: int32(params.APIServerPort),
},
oci.PortMapping{
ListenAddress: listAddr,
ContainerPort: constants.SSHPort,
},
oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ListenAddress: listAddr,
ContainerPort: constants.DockerDaemonPort,
},
oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ListenAddress: listAddr,
ContainerPort: constants.RegistryAddonPort,
},
)
@ -224,12 +233,12 @@ func (d *Driver) GetIP() (string, error) {
// GetExternalIP returns an IP which is accessible from outside
func (d *Driver) GetExternalIP() (string, error) {
return oci.DefaultBindIPV4, nil
return oci.DaemonHost(d.DriverName()), nil
}
// GetSSHHostname returns hostname for use with ssh
func (d *Driver) GetSSHHostname() (string, error) {
return oci.DefaultBindIPV4, nil
return oci.DaemonHost(d.DriverName()), nil
}
// GetSSHPort returns port for use with ssh

View File

@ -27,6 +27,7 @@ import (
"time"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/style"
)

View File

@ -0,0 +1,48 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"os"
"k8s.io/minikube/pkg/minikube/constants"
)
var initialEnvs = make(map[string]string)
func init() {
for _, env := range constants.DockerDaemonEnvs {
if v, set := os.LookupEnv(env); set {
initialEnvs[env] = v
}
exEnv := constants.MinikubeExistingPrefix + env
if v, set := os.LookupEnv(exEnv); set {
initialEnvs[exEnv] = v
}
}
}
// InitialEnv returns the value of the environment variable env before any environment changes made by minikube
func InitialEnv(env string) string {
return initialEnvs[env]
}
// LookupInitialEnv returns the value of the environment variable env before any environment changes made by minikube
func LookupInitialEnv(env string) (string, bool) {
v, set := initialEnvs[env]
return v, set
}

View File

@ -164,6 +164,5 @@ func dockerContainerIP(name string) (string, string, error) {
if len(ips) != 2 {
return "", "", errors.Errorf("container addresses should have 2 values, got %d values: %+v", len(ips), ips)
}
return ips[0], ips[1], nil
}

View File

@ -19,10 +19,10 @@ package oci
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"net"
"os/exec"
"strconv"
"strings"
"github.com/pkg/errors"
@ -135,10 +135,22 @@ type netInfo struct {
mtu int
}
// networkInspect is only used to unmarshal the docker network inspect output and translate it to netInfo
type networkInspect struct {
Name string
Driver string
Subnet string
Gateway string
MTU int
ContainerIPs []string
}
// if exists returns subnet, gateway and mtu
func dockerNetworkInspect(name string) (netInfo, error) {
var vals networkInspect
var info = netInfo{name: name}
cmd := exec.Command(Docker, "network", "inspect", name, "--format", `{{(index .IPAM.Config 0).Subnet}},{{(index .IPAM.Config 0).Gateway}},{{(index .Options "com.docker.network.driver.mtu")}}`)
cmd := exec.Command(Docker, "network", "inspect", name, "--format", `{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{(index .Options "com.docker.network.driver.mtu")}},{{$first := true}} "ContainerIPs": [{{range $k,$v := .Containers }}{{if $first}}{{$first = false}}{{else}}, {{end}}"{{$v.IPv4Address}}"{{end}}]}`)
rr, err := runCmd(cmd)
if err != nil {
logDockerNetworkInspect(name)
@ -149,23 +161,15 @@ func dockerNetworkInspect(name string) (netInfo, error) {
return info, err
}
// results looks like 172.17.0.0/16,172.17.0.1,1500
vals := strings.Split(strings.TrimSpace(rr.Stdout.String()), ",")
if len(vals) == 0 {
return info, fmt.Errorf("empty list network inspect: %q", rr.Output())
// results looks like {"Name": "bridge","Driver": "bridge","Subnet": "172.17.0.0/16","Gateway": "172.17.0.1","MTU": 1500, "ContainerIPs": ["172.17.0.3/16", "172.17.0.2/16"]}
if err := json.Unmarshal(rr.Stdout.Bytes(), &vals); err != nil {
return info, fmt.Errorf("error parsing network inspect output: %q", rr.Stdout.String())
}
if len(vals) > 0 {
info.gateway = net.ParseIP(vals[1])
mtu, err := strconv.Atoi(vals[2])
if err != nil {
klog.Warningf("couldn't parse mtu for docker network %q: %v", name, err)
} else {
info.mtu = mtu
}
}
info.gateway = net.ParseIP(vals.Gateway)
info.mtu = vals.MTU
_, info.subnet, err = net.ParseCIDR(vals[0])
_, info.subnet, err = net.ParseCIDR(vals.Subnet)
if err != nil {
return info, errors.Wrapf(err, "parse subnet for %s", name)
}

View File

@ -17,26 +17,26 @@ limitations under the License.
package oci
import (
"context"
"os"
"time"
"bufio"
"bytes"
"context"
"fmt"
"net/url"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"time"
"github.com/docker/machine/libmachine/state"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/util/retry"
"fmt"
"os/exec"
"runtime"
"strconv"
"strings"
)
// DeleteContainersByLabel deletes all containers that have a specific label
@ -521,18 +521,29 @@ func ListContainersByLabel(ociBin string, label string, warnSlow ...bool) ([]str
// PointToHostDockerDaemon will unset env variables that point to docker inside minikube
// to make sure it points to the docker daemon installed by user.
func PointToHostDockerDaemon() error {
p := os.Getenv(constants.MinikubeActiveDockerdEnv)
if p != "" {
if p := os.Getenv(constants.MinikubeActiveDockerdEnv); p != "" {
klog.Infof("shell is pointing to dockerd inside minikube. will unset to use host")
for _, e := range constants.DockerDaemonEnvs {
if err := resetEnv(e); err != nil {
return err
}
}
}
for i := range constants.DockerDaemonEnvs {
e := constants.DockerDaemonEnvs[i]
err := os.Setenv(e, "")
if err != nil {
return errors.Wrapf(err, "resetting %s env", e)
}
return nil
}
func resetEnv(key string) error {
v := os.Getenv(constants.MinikubeExistingPrefix + key)
if v == "" {
if err := os.Unsetenv(key); err != nil {
return errors.Wrapf(err, "resetting %s env", key)
}
return nil
}
if err := os.Setenv(key, v); err != nil {
return errors.Wrapf(err, "resetting %s env", key)
}
return nil
}
@ -628,3 +639,36 @@ func iptablesFileExists(ociBin string, nameOrID string) bool {
}
return true
}
// DaemonHost returns the ip/hostname where OCI daemon service for driver is running
// For Podman it's always DefaultBindIPV4
// For Docker return the host part of DOCKER_HOST environment variable if set
// or DefaultBindIPV4 otherwise
func DaemonHost(driver string) string {
if driver != Docker {
return DefaultBindIPV4
}
if dh := os.Getenv(constants.DockerHostEnv); dh != "" {
if u, err := url.Parse(dh); err == nil {
if u.Host != "" {
return u.Hostname()
}
}
}
return DefaultBindIPV4
}
// IsExternalDaemonHost returns whether or not the OCI runtime is running on an external/virtual host
// For Podman driver it's always false for now
// For Docker driver return true if DOCKER_HOST is set to a URI, and the URI contains a host item
func IsExternalDaemonHost(driver string) bool {
if driver != Docker {
return false
}
if dh := os.Getenv(constants.DockerHostEnv); dh != "" {
if u, err := url.Parse(dh); err == nil {
return u.Host != ""
}
}
return false
}

View File

@ -0,0 +1,97 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"os"
"testing"
)
func TestPointToHostDockerDaemonEmpty(t *testing.T) {
_ = os.Setenv("DOCKER_HOST", "foo_host")
_ = os.Setenv("DOCKER_CERT_PATH", "foo_cert_path")
_ = os.Setenv("DOCKER_TLS_VERIFY", "foo_tls_verify")
_ = os.Setenv("MINIKUBE_ACTIVE_DOCKERD", "minikube")
_ = os.Unsetenv("MINIKUBE_EXISTING_DOCKER_HOST")
_ = os.Unsetenv("MINIKUBE_EXISTING_DOCKER_CERT_PATH")
_ = os.Unsetenv("MINIKUBE_EXISTING_DOCKER_TLS_VERIFY")
if err := PointToHostDockerDaemon(); err != nil {
t.Fatalf("failed to set docker environment: got %v", err)
}
for _, key := range []string{
"DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY",
} {
if v, set := os.LookupEnv(key); set {
t.Errorf("%v env variable should not be set. got: %v", key, v)
}
}
}
func TestPointToHostDockerDaemon(t *testing.T) {
_ = os.Setenv("DOCKER_HOST", "foo_host")
_ = os.Setenv("DOCKER_CERT_PATH", "foo_cert_path")
_ = os.Setenv("DOCKER_TLS_VERIFY", "foo_tls_verify")
_ = os.Setenv("MINIKUBE_EXISTING_DOCKER_HOST", "bar_host")
_ = os.Setenv("MINIKUBE_EXISTING_DOCKER_CERT_PATH", "bar_cert_path")
_ = os.Setenv("MINIKUBE_EXISTING_DOCKER_TLS_VERIFY", "bar_tls_verify")
if err := PointToHostDockerDaemon(); err != nil {
t.Fatalf("failed to set docker environment: got %v", err)
}
expected := []struct {
key, value string
}{
{"DOCKER_HOST", "bar_host"},
{"DOCKER_CERT_PATH", "bar_cert_path"},
{"DOCKER_TLS_VERIFY", "bar_tls_verify"},
}
for _, exp := range expected {
if v := os.Getenv(exp.key); v != exp.value {
t.Errorf("invalid %v env variable. got: %v, want: %v", exp.value, v, exp.value)
}
}
}
func TestDaemonHost(t *testing.T) {
tests := []struct {
driver string
dockerHost string
expectedAddr string
expectedExternal bool
}{
{"", "", "127.0.0.1", false},
{"docker", "tcp://1.1.1.1:2222/foo", "1.1.1.1", true},
{"podman", "tcp://1.1.1.1:2222/foo", "127.0.0.1", false},
{"_invalid_", "tcp://1.1.1.1:2222/foo", "127.0.0.1", false},
{"docker", "unix:///var/run/something", "127.0.0.1", false},
{"docker", "tcp://127.0.0.1/foo", "127.0.0.1", true},
}
for _, test := range tests {
_ = os.Setenv("DOCKER_HOST", test.dockerHost)
if v := IsExternalDaemonHost(test.driver); v != test.expectedExternal {
t.Errorf("invalid result of IsExternalDaemonHost. got: %v, want: %v", v, test.expectedExternal)
}
if v := DaemonHost(test.driver); v != test.expectedAddr {
t.Errorf("invalid oci daemon host. got: %v, want: %v", v, test.expectedAddr)
}
}
}

View File

@ -24,9 +24,9 @@ import (
const (
// Version is the current version of kic
Version = "v0.0.14"
Version = "v0.0.15-snapshot"
// SHA of the kic base image
baseImageSHA = "2bd97b482faf5b6a403ac39dd5e7c6fe2006425c6663a12f94f64f5f81a7787e"
baseImageSHA = "8aba7f18c2de2d5fa32da5a03653814482682311fa35f146b01d8c569fb73dc5"
)
var (

View File

@ -63,482 +63,413 @@ func (a *Addon) IsEnabled(cc *config.ClusterConfig) bool {
var Addons = map[string]*Addon{
"dashboard": NewAddon([]*BinAsset{
// We want to create the kubernetes-dashboard ns first so that every subsequent object can be created
MustBinAsset("deploy/addons/dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml.tmpl", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640", true),
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-secret.yaml", vmpath.GuestAddonsDir, "dashboard-secret.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-svc.yaml", vmpath.GuestAddonsDir, "dashboard-svc.yaml", "0640", false),
MustBinAsset("deploy/addons/dashboard/dashboard-ns.yaml", vmpath.GuestAddonsDir, "dashboard-ns.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrole.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrole.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-clusterrolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-clusterrolebinding.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-configmap.yaml", vmpath.GuestAddonsDir, "dashboard-configmap.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-dp.yaml.tmpl", vmpath.GuestAddonsDir, "dashboard-dp.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-role.yaml", vmpath.GuestAddonsDir, "dashboard-role.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-rolebinding.yaml", vmpath.GuestAddonsDir, "dashboard-rolebinding.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-sa.yaml", vmpath.GuestAddonsDir, "dashboard-sa.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-secret.yaml", vmpath.GuestAddonsDir, "dashboard-secret.yaml", "0640"),
MustBinAsset("deploy/addons/dashboard/dashboard-svc.yaml", vmpath.GuestAddonsDir, "dashboard-svc.yaml", "0640"),
}, false, "dashboard"),
"default-storageclass": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/storageclass/storageclass.yaml.tmpl",
vmpath.GuestAddonsDir,
"storageclass.yaml",
"0640",
false),
"0640"),
}, true, "default-storageclass"),
"pod-security-policy": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/pod-security-policy/pod-security-policy.yaml.tmpl",
vmpath.GuestAddonsDir,
"pod-security-policy.yaml",
"0640",
false),
"0640"),
}, false, "pod-security-policy"),
"storage-provisioner": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/storage-provisioner/storage-provisioner.yaml.tmpl",
vmpath.GuestAddonsDir,
"storage-provisioner.yaml",
"0640",
true),
"0640"),
}, true, "storage-provisioner"),
"storage-provisioner-gluster": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/storage-provisioner-gluster/storage-gluster-ns.yaml.tmpl",
vmpath.GuestAddonsDir,
"storage-gluster-ns.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/storage-provisioner-gluster/glusterfs-daemonset.yaml.tmpl",
vmpath.GuestAddonsDir,
"glusterfs-daemonset.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/storage-provisioner-gluster/heketi-deployment.yaml.tmpl",
vmpath.GuestAddonsDir,
"heketi-deployment.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/storage-provisioner-gluster/storage-provisioner-glusterfile.yaml.tmpl",
vmpath.GuestAddonsDir,
"storage-privisioner-glusterfile.yaml",
"0640",
false),
"0640"),
}, false, "storage-provisioner-gluster"),
"efk": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/efk/elasticsearch-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"elasticsearch-rc.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/efk/elasticsearch-svc.yaml.tmpl",
vmpath.GuestAddonsDir,
"elasticsearch-svc.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/efk/fluentd-es-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"fluentd-es-rc.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/efk/fluentd-es-configmap.yaml.tmpl",
vmpath.GuestAddonsDir,
"fluentd-es-configmap.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/efk/kibana-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"kibana-rc.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/efk/kibana-svc.yaml.tmpl",
vmpath.GuestAddonsDir,
"kibana-svc.yaml",
"0640",
false),
"0640"),
}, false, "efk"),
"ingress": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/ingress/ingress-configmap.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-configmap.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/ingress/ingress-rbac.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-rbac.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/ingress/ingress-dp.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-dp.yaml",
"0640",
true),
"0640"),
}, false, "ingress"),
"istio-provisioner": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/istio-provisioner/istio-operator.yaml.tmpl",
vmpath.GuestAddonsDir,
"istio-operator.yaml",
"0640",
true),
"0640"),
}, false, "istio-provisioner"),
"istio": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/istio/istio-default-profile.yaml.tmpl",
vmpath.GuestAddonsDir,
"istio-default-profile.yaml",
"0640",
false),
"0640"),
}, false, "istio"),
"kubevirt": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/kubevirt/pod.yaml.tmpl",
vmpath.GuestAddonsDir,
"pod.yaml",
"0640",
false),
"0640"),
}, false, "kubevirt"),
"metrics-server": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/metrics-server/metrics-apiservice.yaml.tmpl",
vmpath.GuestAddonsDir,
"metrics-apiservice.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/metrics-server/metrics-server-deployment.yaml.tmpl",
vmpath.GuestAddonsDir,
"metrics-server-deployment.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/metrics-server/metrics-server-service.yaml.tmpl",
vmpath.GuestAddonsDir,
"metrics-server-service.yaml",
"0640",
false),
"0640"),
}, false, "metrics-server"),
"olm": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/olm/crds.yaml",
"deploy/addons/olm/crds.yaml.tmpl",
vmpath.GuestAddonsDir,
"crds.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/olm/olm.yaml",
"deploy/addons/olm/olm.yaml.tmpl",
vmpath.GuestAddonsDir,
"olm.yaml",
"0640",
false),
"0640"),
}, false, "olm"),
"registry": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/registry/registry-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"registry-rc.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/registry/registry-svc.yaml.tmpl",
vmpath.GuestAddonsDir,
"registry-svc.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/registry/registry-proxy.yaml.tmpl",
vmpath.GuestAddonsDir,
"registry-proxy.yaml",
"0640",
true),
"0640"),
}, false, "registry"),
"registry-creds": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/registry-creds/registry-creds-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"registry-creds-rc.yaml",
"0640",
false),
"0640"),
}, false, "registry-creds"),
"registry-aliases": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/registry-aliases/registry-aliases-sa.tmpl",
vmpath.GuestAddonsDir,
"registry-aliases-sa.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/registry-aliases/registry-aliases-sa-crb.tmpl",
vmpath.GuestAddonsDir,
"registry-aliases-sa-crb.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/registry-aliases/registry-aliases-config.tmpl",
vmpath.GuestAddonsDir,
"registry-aliases-config.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/registry-aliases/node-etc-hosts-update.tmpl",
vmpath.GuestAddonsDir,
"node-etc-hosts-update.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/registry-aliases/patch-coredns-job.tmpl",
vmpath.GuestAddonsDir,
"patch-coredns-job.yaml",
"0640",
true),
"0640"),
}, false, "registry-aliases"),
"freshpod": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/freshpod/freshpod-rc.yaml.tmpl",
vmpath.GuestAddonsDir,
"freshpod-rc.yaml",
"0640",
true),
"0640"),
}, false, "freshpod"),
"nvidia-driver-installer": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/gpu/nvidia-driver-installer.yaml.tmpl",
vmpath.GuestAddonsDir,
"nvidia-driver-installer.yaml",
"0640",
true),
"0640"),
}, false, "nvidia-driver-installer"),
"nvidia-gpu-device-plugin": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl",
vmpath.GuestAddonsDir,
"nvidia-gpu-device-plugin.yaml",
"0640",
true),
"0640"),
}, false, "nvidia-gpu-device-plugin"),
"logviewer": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/logviewer/logviewer-dp-and-svc.yaml.tmpl",
vmpath.GuestAddonsDir,
"logviewer-dp-and-svc.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/logviewer/logviewer-rbac.yaml.tmpl",
vmpath.GuestAddonsDir,
"logviewer-rbac.yaml",
"0640",
false),
"0640"),
}, false, "logviewer"),
"gvisor": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/gvisor/gvisor-pod.yaml.tmpl",
vmpath.GuestAddonsDir,
"gvisor-pod.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/gvisor/gvisor-runtimeclass.yaml",
"deploy/addons/gvisor/gvisor-runtimeclass.yaml.tmpl",
vmpath.GuestAddonsDir,
"gvisor-runtimeclass.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/gvisor/gvisor-config.toml",
vmpath.GuestGvisorDir,
constants.GvisorConfigTomlTargetName,
"0640",
true),
"0640"),
}, false, "gvisor"),
"helm-tiller": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/helm-tiller/helm-tiller-dp.tmpl",
vmpath.GuestAddonsDir,
"helm-tiller-dp.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/helm-tiller/helm-tiller-rbac.tmpl",
vmpath.GuestAddonsDir,
"helm-tiller-rbac.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/helm-tiller/helm-tiller-svc.tmpl",
vmpath.GuestAddonsDir,
"helm-tiller-svc.yaml",
"0640",
true),
"0640"),
}, false, "helm-tiller"),
"ingress-dns": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/ingress-dns/ingress-dns-pod.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-dns-pod.yaml",
"0640",
true),
"0640"),
}, false, "ingress-dns"),
"metallb": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/metallb/metallb.yaml.tmpl",
vmpath.GuestAddonsDir,
"metallb.yaml",
"0640",
true),
"0640"),
MustBinAsset(
"deploy/addons/metallb/metallb-config.yaml.tmpl",
vmpath.GuestAddonsDir,
"metallb-config.yaml",
"0640",
true),
"0640"),
}, false, "metallb"),
"ambassador": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/ambassador/ambassador-operator-crds.yaml",
"deploy/addons/ambassador/ambassador-operator-crds.yaml.tmpl",
vmpath.GuestAddonsDir,
"ambassador-operator-crds.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/ambassador/ambassador-operator.yaml",
"deploy/addons/ambassador/ambassador-operator.yaml.tmpl",
vmpath.GuestAddonsDir,
"ambassador-operator.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/ambassador/ambassadorinstallation.yaml",
"deploy/addons/ambassador/ambassadorinstallation.yaml.tmpl",
vmpath.GuestAddonsDir,
"ambassadorinstallation.yaml",
"0640",
false),
"0640"),
}, false, "ambassador"),
"gcp-auth": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/gcp-auth/gcp-auth-ns.yaml",
"deploy/addons/gcp-auth/gcp-auth-ns.yaml.tmpl",
vmpath.GuestAddonsDir,
"gcp-auth-ns.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/gcp-auth/gcp-auth-service.yaml",
"deploy/addons/gcp-auth/gcp-auth-service.yaml.tmpl",
vmpath.GuestAddonsDir,
"gcp-auth-service.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/gcp-auth/gcp-auth-webhook.yaml.tmpl",
"deploy/addons/gcp-auth/gcp-auth-webhook.yaml.tmpl.tmpl",
vmpath.GuestAddonsDir,
"gcp-auth-webhook.yaml",
"0640",
true),
"0640"),
}, false, "gcp-auth"),
"volumesnapshots": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml.tmpl",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml.tmpl",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml",
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml.tmpl",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshots.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml",
"deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml.tmpl",
vmpath.GuestAddonsDir,
"rbac-volume-snapshot-controller.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml.tmpl",
vmpath.GuestAddonsDir,
"volume-snapshot-controller-deployment.yaml",
"0640",
true),
"0640"),
}, false, "volumesnapshots"),
"csi-hostpath-driver": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml",
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml.tmpl",
vmpath.GuestAddonsDir,
"rbac-external-attacher.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml",
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml.tmpl",
vmpath.GuestAddonsDir,
"rbac-external-provisioner.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml",
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml.tmpl",
vmpath.GuestAddonsDir,
"rbac-external-resizer.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml",
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml.tmpl",
vmpath.GuestAddonsDir,
"rbac-external-snapshotter.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-attacher.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-driverinfo.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-plugin.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-provisioner.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-resizer.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-snapshotter.yaml",
"0640",
false),
"0640"),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml",
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml.tmpl",
vmpath.GuestAddonsDir,
"csi-hostpath-storageclass.yaml",
"0640",
false),
"0640"),
}, false, "csi-hostpath-driver"),
}

View File

@ -198,8 +198,8 @@ type BinAsset struct {
}
// MustBinAsset creates a new BinAsset, or panics if invalid
func MustBinAsset(name, targetDir, targetName, permissions string, isTemplate bool) *BinAsset {
asset, err := NewBinAsset(name, targetDir, targetName, permissions, isTemplate)
func MustBinAsset(name, targetDir, targetName, permissions string) *BinAsset {
asset, err := NewBinAsset(name, targetDir, targetName, permissions)
if err != nil {
panic(fmt.Sprintf("Failed to define asset %s: %v", name, err))
}
@ -207,7 +207,7 @@ func MustBinAsset(name, targetDir, targetName, permissions string, isTemplate bo
}
// NewBinAsset creates a new BinAsset
func NewBinAsset(name, targetDir, targetName, permissions string, isTemplate bool) (*BinAsset, error) {
func NewBinAsset(name, targetDir, targetName, permissions string) (*BinAsset, error) {
m := &BinAsset{
BaseAsset: BaseAsset{
SourcePath: name,
@ -217,7 +217,7 @@ func NewBinAsset(name, targetDir, targetName, permissions string, isTemplate boo
},
template: nil,
}
err := m.loadData(isTemplate)
err := m.loadData()
return m, err
}
@ -232,21 +232,19 @@ func defaultValue(defValue string, val interface{}) string {
return strVal
}
func (m *BinAsset) loadData(isTemplate bool) error {
func (m *BinAsset) loadData() error {
contents, err := Asset(m.SourcePath)
if err != nil {
return err
}
if isTemplate {
tpl, err := template.New(m.SourcePath).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents))
if err != nil {
return err
}
m.template = tpl
tpl, err := template.New(m.SourcePath).Funcs(template.FuncMap{"default": defaultValue}).Parse(string(contents))
if err != nil {
return err
}
m.template = tpl
m.length = len(contents)
m.reader = bytes.NewReader(contents)
klog.V(1).Infof("Created asset %s with %d bytes", m.SourcePath, m.length)

View File

@ -19,11 +19,13 @@ package bsutil
import (
"fmt"
"io/ioutil"
"sort"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/pmezard/go-difflib/difflib"
"golang.org/x/mod/semver"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
@ -75,9 +77,28 @@ func getExtraOptsPodCidr() []config.ExtraOption {
}
}
func recentReleases() ([]string, error) {
// test the 6 most recent releases
versions := []string{"v1.20.0-beta.1", "v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12"}
// recentReleases returns a dynamic list of up to n recent testdata versions, sorted from newest to older.
// If n > 0, returns at most n versions.
// If n <= 0, returns all the versions.
// It will error if no testdata are available or in absence of testdata for newest and default minor k8s versions.
func recentReleases(n int) ([]string, error) {
path := "testdata"
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, fmt.Errorf("unable to list testdata directory %s: %w", path, err)
}
var versions []string
for _, file := range files {
if file.IsDir() {
versions = append(versions, file.Name())
}
}
sort.Slice(versions, func(i, j int) bool { return versions[i] > versions[j] })
if n <= 0 || n > len(versions) {
n = len(versions)
}
versions = versions[0:n]
foundNewest := false
foundDefault := false
@ -108,7 +129,17 @@ This test case has only 1 thing to test and that is the
nnetworking/dnsDomain value
*/
func TestGenerateKubeadmYAMLDNS(t *testing.T) {
versions := []string{"v1.20.0-beta.1", "v1.19", "v1.18", "v1.17", "v1.16", "v1.15", "v1.14", "v1.13", "v1.12"}
// test all testdata releases greater than v1.11
versions, err := recentReleases(0)
if err != nil {
t.Errorf("versions: %v", err)
}
for i, v := range versions {
if semver.Compare(v, "v1.11") <= 0 {
versions = versions[0:i]
break
}
}
fcr := command.NewFakeCommandRunner()
fcr.SetCommandToOutput(map[string]string{
"docker info --format {{.CgroupDriver}}": "systemd\n",
@ -138,6 +169,11 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
},
}
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
// if version+".0" does not yet have a stable release, use NewestKubernetesVersion
// ie, 'v1.20.0-beta.1' NewestKubernetesVersion indicates that 'v1.20.0' is not yet released as stable
if semver.Compare(cfg.KubernetesConfig.KubernetesVersion, constants.NewestKubernetesVersion) == 1 {
cfg.KubernetesConfig.KubernetesVersion = constants.NewestKubernetesVersion
}
cfg.KubernetesConfig.ClusterName = "kubernetes"
got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime)
@ -175,7 +211,8 @@ func TestGenerateKubeadmYAMLDNS(t *testing.T) {
func TestGenerateKubeadmYAML(t *testing.T) {
extraOpts := getExtraOpts()
extraOptsPodCidr := getExtraOptsPodCidr()
versions, err := recentReleases()
// test the 6 most recent releases
versions, err := recentReleases(6)
if err != nil {
t.Errorf("versions: %v", err)
}
@ -225,6 +262,11 @@ func TestGenerateKubeadmYAML(t *testing.T) {
}
}
cfg.KubernetesConfig.KubernetesVersion = version + ".0"
// if version+".0" does not yet have a stable release, use NewestKubernetesVersion
// ie, 'v1.20.0-beta.1' NewestKubernetesVersion indicates that 'v1.20.0' is not yet released as stable
if semver.Compare(cfg.KubernetesConfig.KubernetesVersion, constants.NewestKubernetesVersion) == 1 {
cfg.KubernetesConfig.KubernetesVersion = constants.NewestKubernetesVersion
}
cfg.KubernetesConfig.ClusterName = "kubernetes"
got, err := GenerateKubeadmYAML(cfg, cfg.Nodes[0], runtime)

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 12345
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:12345
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,74 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/crio/crio.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
feature-gates: "a=b"
controllerManager:
extraArgs:
feature-gates: "a=b"
kube-api-burst: "32"
leader-elect: "false"
scheduler:
extraArgs:
feature-gates: "a=b"
leader-elect: "false"
scheduler-name: "mini-scheduler"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/crio/crio.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,67 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: 1.1.1.1
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
imageRepository: test/repo
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249

View File

@ -0,0 +1,71 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
controllerManager:
extraArgs:
kube-api-burst: "32"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
scheduler-name: "mini-scheduler"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.20.0-beta.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 1.1.1.1:10249
mode: "iptables"

View File

@ -194,9 +194,13 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert
return nil, errors.Wrap(err, "getting service cluster ip")
}
apiServerIPs := append(
k8s.APIServerIPs,
[]net.IP{net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...)
apiServerIPs := append(k8s.APIServerIPs,
net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1"))
if v := oci.DaemonHost(k8s.ContainerRuntime); v != oci.DefaultBindIPV4 {
apiServerIPs = append(apiServerIPs, net.ParseIP(v))
}
apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.ControlPlaneAlias)
apiServerAlternateNames := append(
apiServerNames,

View File

@ -35,9 +35,6 @@ var keywords = []string{"start", "stop", "status", "delete", "config", "open", "
// IsValid checks if the profile has the essential info needed for a profile
func (p *Profile) IsValid() bool {
if p.Config == nil {
return false
}
if p.Config == nil {
return false
}

View File

@ -79,6 +79,20 @@ const (
MinikubeForceSystemdEnv = "MINIKUBE_FORCE_SYSTEMD"
// TestDiskUsedEnv is used in integration tests for insufficient storage with 'minikube status'
TestDiskUsedEnv = "MINIKUBE_TEST_STORAGE_CAPACITY"
// scheduled stop constants
ScheduledStopEnvFile = "/var/lib/minikube/scheduled-stop/environment"
ScheduledStopSystemdService = "minikube-scheduled-stop"
// MinikubeExistingPrefix is used to save the original environment when executing docker-env
MinikubeExistingPrefix = "MINIKUBE_EXISTING_"
// ExistingDockerHostEnv is used to save original docker environment
ExistingDockerHostEnv = MinikubeExistingPrefix + "DOCKER_HOST"
// ExistingDockerCertPathEnv is used to save original docker environment
ExistingDockerCertPathEnv = MinikubeExistingPrefix + "DOCKER_CERT_PATH"
// ExistingDockerTLSVerifyEnv is used to save original docker environment
ExistingDockerTLSVerifyEnv = MinikubeExistingPrefix + "DOCKER_TLS_VERIFY"
)
var (
@ -94,6 +108,9 @@ var (
// DockerDaemonEnvs is list of docker-daemon related environment variables.
DockerDaemonEnvs = [3]string{DockerHostEnv, DockerTLSVerifyEnv, DockerCertPathEnv}
// ExistingDockerDaemonEnvs is list of docker-daemon related environment variables.
ExistingDockerDaemonEnvs = [3]string{ExistingDockerHostEnv, ExistingDockerTLSVerifyEnv, ExistingDockerCertPathEnv}
// PodmanRemoteEnvs is list of podman-remote related environment variables.
PodmanRemoteEnvs = [1]string{PodmanVarlinkBridgeEnv}

View File

@ -47,7 +47,13 @@ func (cpb *progressBar) TrackProgress(src string, currentSize, totalSize int64,
cpb.progress = pb.New64(totalSize)
}
p := pb.Full.Start64(totalSize)
p.Set("prefix", " > "+filepath.Base(src+": "))
fn := filepath.Base(src)
// abbreviate filename for progress
maxwidth := 30 - len("...")
if len(fn) > maxwidth {
fn = fn[0:maxwidth] + "..."
}
p.Set("prefix", " > "+fn+": ")
p.SetCurrent(currentSize)
p.Set(pb.Bytes, true)

View File

@ -151,8 +151,14 @@ func NeedsRoot(name string) bool {
// NeedsPortForward returns true if driver is unable provide direct IP connectivity
func NeedsPortForward(name string) bool {
if !IsKIC(name) {
return false
}
if oci.IsExternalDaemonHost(name) {
return true
}
// Docker for Desktop
return IsKIC(name) && (runtime.GOOS == "darwin" || runtime.GOOS == "windows" || IsMicrosoftWSL())
return runtime.GOOS == "darwin" || runtime.GOOS == "windows" || IsMicrosoftWSL()
}
// IsMicrosoftWSL will return true if process is running in WSL in windows

View File

@ -29,7 +29,8 @@ import (
func ControlPlaneEndpoint(cc *config.ClusterConfig, cp *config.Node, driverName string) (string, net.IP, int, error) {
if NeedsPortForward(driverName) {
port, err := oci.ForwardedPort(cc.Driver, cc.Name, cp.Port)
hostname := oci.DefaultBindIPV4
hostname := oci.DaemonHost(driverName)
ip := net.ParseIP(hostname)
if ip == nil {
return hostname, ip, port, fmt.Errorf("failed to parse ip for %q", hostname)

View File

@ -232,6 +232,7 @@ var (
DrvNotDetected = Kind{ID: "DRV_NOT_DETECTED", ExitCode: ExDriverNotFound}
DrvAsRoot = Kind{ID: "DRV_AS_ROOT", ExitCode: ExDriverPermission}
DrvNeedsRoot = Kind{ID: "DRV_NEEDS_ROOT", ExitCode: ExDriverPermission}
DrvNeedsAdministrator = Kind{ID: "DRV_NEEDS_ADMINISTRATOR", ExitCode: ExDriverPermission}
GuestCacheLoad = Kind{ID: "GUEST_CACHE_LOAD", ExitCode: ExGuestError}
GuestCert = Kind{ID: "GUEST_CERT", ExitCode: ExGuestError}

View File

@ -89,7 +89,7 @@ func status() registry.State {
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, path, "@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent")
cmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive","@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent")
out, err := cmd.CombinedOutput()
if err != nil {
@ -105,5 +105,32 @@ func status() registry.State {
return registry.State{Installed: false, Running: false, Error: errorMessage, Fix: fixMessage, Doc: docURL}
}
// Ensure user is either a Windows Administrator or a Hyper-V Administrator.
adminCheckCmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive",`@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator")`)
adminCheckOut, adminCheckErr := adminCheckCmd.CombinedOutput()
if adminCheckErr != nil {
errorMessage := fmt.Errorf("%s returned %q", strings.Join(adminCheckCmd.Args, " "), adminCheckOut)
fixMessage := "Unable to determine current user's administrator privileges"
return registry.State{Installed: true, Running: false, Error: errorMessage, Fix: fixMessage}
}
hypervAdminCheckCmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive", `@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole(([System.Security.Principal.SecurityIdentifier]::new("S-1-5-32-578")))`)
hypervAdminCheckOut, hypervAdminCheckErr := hypervAdminCheckCmd.CombinedOutput()
if hypervAdminCheckErr != nil {
errorMessage := fmt.Errorf("%s returned %q", strings.Join(hypervAdminCheckCmd.Args, " "), hypervAdminCheckOut)
fixMessage := "Unable to determine current user's Hyper-V administrator privileges."
return registry.State{Installed: true, Running: false, Error: errorMessage, Fix: fixMessage}
}
if (strings.TrimSpace(string(adminCheckOut)) != "True") && (strings.TrimSpace(string(hypervAdminCheckOut)) != "True") {
err := fmt.Errorf("Hyper-V requires Administrator privileges")
fixMessage := "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode."
return registry.State{Installed: true, Running: false, Error: err, Fix: fixMessage}
}
return registry.State{Installed: true, Healthy: true}
}
}

View File

@ -20,16 +20,93 @@ package schedule
import (
"fmt"
"os/exec"
"time"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/sysinit"
)
// KillExisting will kill existing scheduled stops
func KillExisting(profiles []string) {
klog.Errorf("not yet implemented for windows")
for _, profile := range profiles {
if err := killExisting(profile); err != nil {
klog.Errorf("error terminating scheduled stop for profile %s: %v", profile, err)
}
}
}
func daemonize(profiles []string, duration time.Duration) error {
return fmt.Errorf("not yet implemented for windows")
func killExisting(profile string) error {
klog.Infof("trying to kill existing schedule stop for profile %s...", profile)
api, err := machine.NewAPIClient()
if err != nil {
return errors.Wrapf(err, "getting api client for profile %s", profile)
}
h, err := api.Load(profile)
if err != nil {
return errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
}
runner, err := machine.CommandRunner(h)
if err != nil {
return errors.Wrap(err, "getting command runner")
}
// restart scheduled stop service in container
sysManger := sysinit.New(runner)
if err := sysManger.Stop(constants.ScheduledStopSystemdService); err != nil {
return errors.Wrapf(err, "stopping schedule-stop service for profile %s", profile)
}
return nil
}
// to daemonize on windows, we schedule the stop within minikube itself
// starting the minikube-scheduled-stop systemd service kicks off the scheduled stop
func daemonize(profiles []string, duration time.Duration) error {
for _, profile := range profiles {
if err := startSystemdService(profile, duration); err != nil {
return errors.Wrapf(err, "implementing scheduled stop for %s", profile)
}
}
return nil
}
// to start the systemd service, we first have to tell the systemd service how long to sleep for
// before shutting down minikube from within
// we do this by settig the SLEEP environment variable in the environment file to the users
// requested duration
func startSystemdService(profile string, duration time.Duration) error {
// get ssh runner
klog.Infof("starting systemd service for profile %s...", profile)
api, err := machine.NewAPIClient()
if err != nil {
return errors.Wrapf(err, "getting api client for profile %s", profile)
}
h, err := api.Load(profile)
if err != nil {
return errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
}
runner, err := machine.CommandRunner(h)
if err != nil {
return errors.Wrap(err, "getting command runner")
}
if rr, err := runner.RunCmd(exec.Command("sudo", "mkdir", "-p", "/var/lib/minikube/scheduled-stop")); err != nil {
return errors.Wrapf(err, "creating dirs: %v", rr.Output())
}
// update environment file to include duration
if err := runner.Copy(environmentFile(duration)); err != nil {
return errors.Wrap(err, "copying scheduled stop env file")
}
// restart scheduled stop service in container
sysManger := sysinit.New(runner)
return sysManger.Restart(constants.ScheduledStopSystemdService)
}
// return the contents of the environment file for minikube-scheduled-stop systemd service
// should be of the format SLEEP=<scheduled stop requested by user in seconds>
func environmentFile(duration time.Duration) assets.CopyableFile {
contents := []byte(fmt.Sprintf("SLEEP=%v", duration.Seconds()))
return assets.NewMemoryAssetTarget(contents, constants.ScheduledStopEnvFile, "0644")
}

View File

@ -41,11 +41,19 @@ func Daemonize(profiles []string, duration time.Duration) error {
continue
}
daemonizeProfiles = append(daemonizeProfiles, p)
}
if err := daemonize(daemonizeProfiles, duration); err != nil {
return errors.Wrap(err, "daemonizing")
}
// save scheduled stop config if daemonize was successful
for _, d := range daemonizeProfiles {
_, cc := mustload.Partial(d)
cc.ScheduledStop = scheduledStop
if err := config.SaveProfile(p, cc); err != nil {
if err := config.SaveProfile(d, cc); err != nil {
return errors.Wrap(err, "saving profile")
}
}
return daemonize(daemonizeProfiles, duration)
return nil
}

View File

@ -24,12 +24,21 @@ import (
"io"
"os"
"runtime"
"strings"
"text/template"
"github.com/docker/machine/libmachine/shell"
"k8s.io/minikube/pkg/minikube/constants"
)
var unsetEnvTmpl = "{{ $root := .}}" +
"{{ range .Unset }}" +
"{{ $root.UnsetPrefix }}{{ . }}{{ $root.UnsetDelimiter }}{{ $root.UnsetSuffix }}" +
"{{ end }}" +
"{{ range .Set }}" +
"{{ $root.SetPrefix }}{{ .Env }}{{ $root.SetDelimiter }}{{ .Value }}{{ $root.SetSuffix }}" +
"{{ end }}"
// Config represents the shell config
type Config struct {
Prefix string
@ -107,7 +116,7 @@ REM @FOR /f "tokens=*" %%i IN ('%s') DO @%%i
suffix: "\"\n",
delimiter: "=\"",
unsetPrefix: "unset ",
unsetSuffix: "\n",
unsetSuffix: ";\n",
unsetDelimiter: "",
usageHint: func(s ...interface{}) string {
return fmt.Sprintf(`
@ -181,24 +190,46 @@ func SetScript(ec EnvConfig, w io.Writer, envTmpl string, data interface{}) erro
return tmpl.Execute(w, data)
}
type unsetConfigItem struct {
Env, Value string
}
type unsetConfig struct {
Set []unsetConfigItem
Unset []string
SetPrefix string
SetDelimiter string
SetSuffix string
UnsetPrefix string
UnsetDelimiter string
UnsetSuffix string
}
// UnsetScript writes out a shell-compatible unset script
func UnsetScript(ec EnvConfig, w io.Writer, vars []string) error {
var sb strings.Builder
shellCfg := ec.getShell()
pfx, sfx, delim := shellCfg.unsetPrefix, shellCfg.unsetSuffix, shellCfg.unsetDelimiter
switch ec.Shell {
case "cmd", "emacs", "fish":
break
case "powershell":
vars = []string{strings.Join(vars, " Env:\\\\")}
default:
vars = []string{strings.Join(vars, " ")}
cfg := unsetConfig{
SetPrefix: shellCfg.prefix,
SetDelimiter: shellCfg.delimiter,
SetSuffix: shellCfg.suffix,
UnsetPrefix: shellCfg.unsetPrefix,
UnsetDelimiter: shellCfg.unsetDelimiter,
UnsetSuffix: shellCfg.unsetSuffix,
}
for _, v := range vars {
if _, err := sb.WriteString(fmt.Sprintf("%s%s%s%s", pfx, v, delim, sfx)); err != nil {
return err
var tempUnset []string
for _, env := range vars {
exEnv := constants.MinikubeExistingPrefix + env
if v := os.Getenv(exEnv); v == "" {
cfg.Unset = append(cfg.Unset, env)
} else {
cfg.Set = append(cfg.Set, unsetConfigItem{
Env: env,
Value: v,
})
tempUnset = append(tempUnset, exEnv)
}
}
_, err := w.Write([]byte(sb.String()))
return err
cfg.Unset = append(cfg.Unset, tempUnset...)
tmpl := template.Must(template.New("unsetEnv").Parse(unsetEnvTmpl))
return tmpl.Execute(w, &cfg)
}

View File

@ -87,16 +87,19 @@ func TestUnsetScript(t *testing.T) {
ec EnvConfig
expected string
}{
{[]string{"baz", "bar"}, EnvConfig{""}, `unset baz bar`},
{[]string{"baz", "bar"}, EnvConfig{"bash"}, `unset baz bar`},
{[]string{"baz", "bar"}, EnvConfig{"powershell"}, `Remove-Item Env:\\baz Env:\\bar`},
{[]string{"baz", "bar"}, EnvConfig{""}, `unset baz;
unset bar;`},
{[]string{"baz", "bar"}, EnvConfig{"bash"}, `unset baz;
unset bar;`},
{[]string{"baz", "bar"}, EnvConfig{"powershell"}, `Remove-Item Env:\\baz
Remove-Item Env:\\bar`},
{[]string{"baz", "bar"}, EnvConfig{"cmd"}, `SET baz=
SET bar=`},
{[]string{"baz", "bar"}, EnvConfig{"fish"}, `set -e baz;
set -e bar;`},
{[]string{"baz", "bar"}, EnvConfig{"emacs"}, `(setenv "baz" nil)
(setenv "bar" nil)`},
{[]string{"baz", "bar"}, EnvConfig{"none"}, `baz bar`},
{[]string{"baz", "bar"}, EnvConfig{"none"}, "baz\nbar"},
}
for _, tc := range testCases {
tc := tc

View File

@ -20,6 +20,7 @@ package sysinit
import (
"bytes"
"context"
"fmt"
"html/template"
"os/exec"
"path"
@ -128,6 +129,12 @@ func (s *OpenRC) Restart(svc string) error {
return err
}
// Reload reloads a service
// currently only used by our docker-env that doesn't need openrc implementation
func (s *OpenRC) Reload(svc string) error {
return fmt.Errorf("reload is not implemented for OpenRC yet ! Please implement if needed")
}
// Stop stops a service
func (s *OpenRC) Stop(svc string) error {
rr, err := s.r.RunCmd(exec.Command("sudo", "service", svc, "stop"))

View File

@ -50,6 +50,9 @@ type Manager interface {
// Restart restarts a service
Restart(string) error
// Reload restarts a service
Reload(string) error
// Stop stops a service
Stop(string) error

View File

@ -34,8 +34,8 @@ func (s *Systemd) Name() string {
return "systemd"
}
// reload reloads systemd configuration
func (s *Systemd) reload() error {
// daemonReload reloads systemd configuration
func (s *Systemd) daemonReload() error {
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "daemon-reload"))
return err
}
@ -63,7 +63,7 @@ func (s *Systemd) Enable(svc string) error {
// Start starts a service
func (s *Systemd) Start(svc string) error {
if err := s.reload(); err != nil {
if err := s.daemonReload(); err != nil {
return err
}
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "start", svc))
@ -72,13 +72,22 @@ func (s *Systemd) Start(svc string) error {
// Restart restarts a service
func (s *Systemd) Restart(svc string) error {
if err := s.reload(); err != nil {
if err := s.daemonReload(); err != nil {
return err
}
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "restart", svc))
return err
}
// Reload reloads a service
func (s *Systemd) Reload(svc string) error {
if err := s.daemonReload(); err != nil {
return err
}
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "reload", svc))
return err
}
// Stop stops a service
func (s *Systemd) Stop(svc string) error {
_, err := s.r.RunCmd(exec.Command("sudo", "systemctl", "stop", svc))

View File

@ -36,6 +36,7 @@ import (
"github.com/docker/machine/libmachine/swarm"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
@ -96,12 +97,17 @@ func configureAuth(p miniProvisioner) error {
return errors.Wrap(err, "error getting ip during provisioning")
}
hostIP, err := driver.GetSSHHostname()
if err != nil {
return errors.Wrap(err, "error getting ssh hostname during provisioning")
}
if err := copyHostCerts(authOptions); err != nil {
return err
}
// The Host IP is always added to the certificate's SANs list
hosts := append(authOptions.ServerCertSANs, ip, "localhost", "127.0.0.1", "minikube", machineName)
hosts := append(authOptions.ServerCertSANs, ip, hostIP, "localhost", "127.0.0.1", "minikube", machineName)
klog.Infof("generating server cert: %s ca-key=%s private-key=%s org=%s san=%s",
authOptions.ServerCertPath,
authOptions.CaCertPath,

Some files were not shown because too many files have changed in this diff Show More