Merge remote-tracking branch 'origin/master' into fix-hostname
commit
63aef4ab54
36
CHANGELOG.md
36
CHANGELOG.md
|
@ -1,5 +1,41 @@
|
|||
# Release Notes
|
||||
|
||||
## Version 1.9.0-beta.2 - 2020-03-21
|
||||
|
||||
New features & improvements
|
||||
|
||||
* 🎉 Experimental multi-node support 🎊 [#6787](https://github.com/kubernetes/minikube/pull/6787)
|
||||
* Add kubectl desc nodes to minikube logs [#7105](https://github.com/kubernetes/minikube/pull/7105)
|
||||
* bumpup helm-tiller v2.16.1 → v2.16.3 [#7130](https://github.com/kubernetes/minikube/pull/7130)
|
||||
* Update Nvidia GPU plugin [#7132](https://github.com/kubernetes/minikube/pull/7132)
|
||||
* bumpup istio & istio-provisoner addon 1.4.0 → 1.5.0 [#7120](https://github.com/kubernetes/minikube/pull/7120)
|
||||
* New addon: registry-aliases [#6657](https://github.com/kubernetes/minikube/pull/6657)
|
||||
* Upgrade buildroot minor version [#7101](https://github.com/kubernetes/minikube/pull/7101)
|
||||
* Skip kubeadm if cluster is running & properly configured [#7124](https://github.com/kubernetes/minikube/pull/7124)
|
||||
* Make certificates per-profile and consistent until IP or names change [#7125](https://github.com/kubernetes/minikube/pull/7125)
|
||||
|
||||
Bugfixes
|
||||
|
||||
* Prevent minikube from crashing if namespace or service doesn't exist [#5844](https://github.com/kubernetes/minikube/pull/5844)
|
||||
* Add warning if both vm-driver and driver are specified [#7109](https://github.com/kubernetes/minikube/pull/7109)
|
||||
* Improve error when docker-env is used with non-docker runtime [#7112](https://github.com/kubernetes/minikube/pull/7112)
|
||||
* provisioner: only reload docker if necessary, don't install curl [#7115](https://github.com/kubernetes/minikube/pull/7115)
|
||||
|
||||
Thank you to our contributors:
|
||||
|
||||
- Anders F Björklund
|
||||
- Iso Kenta
|
||||
- Kamesh Sampath
|
||||
- Kenta Iso
|
||||
- Prasad Katti
|
||||
- Priya Wadhwa
|
||||
- Sharif Elgamal
|
||||
- Tacio Costa
|
||||
- Thomas Strömberg
|
||||
- Zhongcheng Lao
|
||||
- rajula96reddy
|
||||
- sayboras
|
||||
|
||||
## Version 1.9.0-beta.1 - 2020-03-18
|
||||
|
||||
New features
|
||||
|
|
2
Makefile
2
Makefile
|
@ -15,7 +15,7 @@
|
|||
# Bump these on release - and please check ISO_VERSION for correctness.
|
||||
VERSION_MAJOR ?= 1
|
||||
VERSION_MINOR ?= 9
|
||||
VERSION_BUILD ?= 0-beta.1
|
||||
VERSION_BUILD ?= 0-beta.2
|
||||
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION ?= v$(RAW_VERSION)
|
||||
|
||||
|
|
|
@ -95,18 +95,18 @@ var logsCmd = &cobra.Command{
|
|||
exit.WithError("Unable to get runtime", err)
|
||||
}
|
||||
if followLogs {
|
||||
err := logs.Follow(cr, bs, runner)
|
||||
err := logs.Follow(cr, bs, *cfg, runner)
|
||||
if err != nil {
|
||||
exit.WithError("Follow", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if showProblems {
|
||||
problems := logs.FindProblems(cr, bs, runner)
|
||||
problems := logs.FindProblems(cr, bs, *cfg, runner)
|
||||
logs.OutputProblems(problems, numberOfProblems)
|
||||
return
|
||||
}
|
||||
err = logs.Output(cr, bs, runner, numberOfLines)
|
||||
err = logs.Output(cr, bs, *cfg, runner, numberOfLines)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting machine logs", err)
|
||||
}
|
||||
|
|
|
@ -850,7 +850,7 @@ func validateRegistryMirror() {
|
|||
}
|
||||
}
|
||||
|
||||
// generateCfgFromFlags generates config.Config based on flags and supplied arguments
|
||||
// generateCfgFromFlags generates config.ClusterConfig based on flags and supplied arguments
|
||||
func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) (config.ClusterConfig, config.Node, error) {
|
||||
r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)})
|
||||
if err != nil {
|
||||
|
|
3
go.sum
3
go.sum
|
@ -61,7 +61,9 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
|
|||
github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c h1:18gEt7qzn7CW7qMkfPTFyyotlPbvPQo9o4IDV8jZqP4=
|
||||
github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
|
@ -972,6 +974,7 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
|||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -286,7 +286,6 @@ fi
|
|||
touch "${TEST_OUT}"
|
||||
${SUDO_PREFIX}${E2E_BIN} \
|
||||
-minikube-start-args="--driver=${VM_DRIVER} ${EXTRA_START_ARGS}" \
|
||||
-expected-default-driver="${EXPECTED_DEFAULT_DRIVER}" \
|
||||
-test.timeout=70m -test.v \
|
||||
${EXTRA_TEST_ARGS} \
|
||||
-binary="${MINIKUBE_BIN}" 2>&1 | tee "${TEST_OUT}"
|
||||
|
|
|
@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
|
|||
|
||||
./out/minikube-windows-amd64.exe delete
|
||||
|
||||
out/e2e-windows-amd64.exe --expected-default-driver=hyperv -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
|
||||
out/e2e-windows-amd64.exe -minikube-start-args="--driver=hyperv --hyperv-virtual-switch=primary-virtual-switch" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=65m
|
||||
$env:result=$lastexitcode
|
||||
# If the last exit code was 0->success, x>0->error
|
||||
If($env:result -eq 0){$env:status="success"}
|
||||
|
|
|
@ -19,7 +19,7 @@ gsutil.cmd -m cp -r gs://minikube-builds/$env:MINIKUBE_LOCATION/testdata .
|
|||
|
||||
./out/minikube-windows-amd64.exe delete
|
||||
|
||||
out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -expected-default-driver=hyperv -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m
|
||||
out/e2e-windows-amd64.exe -minikube-start-args="--driver=virtualbox" -binary=out/minikube-windows-amd64.exe -test.v -test.timeout=30m
|
||||
$env:result=$lastexitcode
|
||||
# If the last exit code was 0->success, x>0->error
|
||||
If($env:result -eq 0){$env:status="success"}
|
||||
|
|
|
@ -44,7 +44,7 @@ type Bootstrapper interface {
|
|||
UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error
|
||||
GenerateToken(config.ClusterConfig) (string, error)
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(LogOptions) map[string]string
|
||||
LogCommands(config.ClusterConfig, LogOptions) map[string]string
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
GetKubeletStatus() (string, error)
|
||||
GetAPIServerStatus(net.IP, int) (string, error)
|
||||
|
|
|
@ -94,16 +94,14 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
|
|||
EtcdDataDir: EtcdDataDir(),
|
||||
ClusterName: cc.Name,
|
||||
//kubeadm uses NodeName as the --hostname-override parameter, so this needs to be the name of the machine
|
||||
NodeName: driver.KubeNodeName(cc, n),
|
||||
CRISocket: r.SocketPath(),
|
||||
ImageRepository: k8s.ImageRepository,
|
||||
ComponentOptions: componentOpts,
|
||||
FeatureArgs: kubeadmFeatureArgs,
|
||||
NoTaintMaster: false, // That does not work with k8s 1.12+
|
||||
DNSDomain: k8s.DNSDomain,
|
||||
NodeIP: n.IP,
|
||||
// NOTE: If set to an specific VM IP, things may break if the IP changes on host restart
|
||||
// For multi-node, we may need to figure out an alternate strategy, like DNS or hosts files
|
||||
NodeName: driver.KubeNodeName(cc, n),
|
||||
CRISocket: r.SocketPath(),
|
||||
ImageRepository: k8s.ImageRepository,
|
||||
ComponentOptions: componentOpts,
|
||||
FeatureArgs: kubeadmFeatureArgs,
|
||||
NoTaintMaster: false, // That does not work with k8s 1.12+
|
||||
DNSDomain: k8s.DNSDomain,
|
||||
NodeIP: n.IP,
|
||||
ControlPlaneAddress: cp.IP,
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/logs"
|
||||
)
|
||||
|
@ -45,7 +46,7 @@ import (
|
|||
const minLogCheckTime = 30 * time.Second
|
||||
|
||||
// WaitForAPIServerProcess waits for api server to be healthy returns error if it doesn't
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver process to appear ...")
|
||||
err := wait.PollImmediate(time.Millisecond*500, timeout, func() (bool, error) {
|
||||
if time.Since(start) > timeout {
|
||||
|
@ -53,7 +54,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -142,7 +143,7 @@ func podStatusMsg(pod core.Pod) string {
|
|||
}
|
||||
|
||||
// WaitForSystemPods verifies essential pods for running kurnetes is running
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
|
||||
glog.Info("waiting for kube-system pods to appear ...")
|
||||
pStart := time.Now()
|
||||
|
||||
|
@ -151,7 +152,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm
|
|||
return false, fmt.Errorf("cluster wait timed out during pod check")
|
||||
}
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -179,7 +180,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr comm
|
|||
}
|
||||
|
||||
// WaitForHealthyAPIServer waits for api server status to be running
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
|
||||
glog.Infof("waiting for apiserver healthz status ...")
|
||||
hStart := time.Now()
|
||||
|
||||
|
@ -189,7 +190,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
if time.Since(start) > minLogCheckTime {
|
||||
announceProblems(r, bs, cr)
|
||||
announceProblems(r, bs, cfg, cr)
|
||||
time.Sleep(kconst.APICallRetryInterval * 5)
|
||||
}
|
||||
|
||||
|
@ -212,8 +213,8 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
// announceProblems checks for problems, and slows polling down if any are found
|
||||
func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr command.Runner) {
|
||||
problems := logs.FindProblems(r, bs, cr)
|
||||
func announceProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner) {
|
||||
problems := logs.FindProblems(r, bs, cfg, cr)
|
||||
if len(problems) > 0 {
|
||||
logs.OutputProblems(problems, 5)
|
||||
time.Sleep(kconst.APICallRetryInterval * 15)
|
||||
|
|
|
@ -110,7 +110,7 @@ func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, port int) (string, error) {
|
|||
}
|
||||
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string {
|
||||
func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogOptions) map[string]string {
|
||||
var kubelet strings.Builder
|
||||
kubelet.WriteString("sudo journalctl -u kubelet")
|
||||
if o.Lines > 0 {
|
||||
|
@ -128,9 +128,15 @@ func (k *Bootstrapper) LogCommands(o bootstrapper.LogOptions) map[string]string
|
|||
if o.Lines > 0 {
|
||||
dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines))
|
||||
}
|
||||
|
||||
describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s",
|
||||
path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"),
|
||||
path.Join(vmpath.GuestPersistentDir, "kubeconfig"))
|
||||
|
||||
return map[string]string{
|
||||
"kubelet": kubelet.String(),
|
||||
"dmesg": dmesg.String(),
|
||||
"kubelet": kubelet.String(),
|
||||
"dmesg": dmesg.String(),
|
||||
"describe nodes": describeNodes,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,6 +157,30 @@ func (k *Bootstrapper) createCompatSymlinks() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// clearStaleConfigs clears configurations which may have stale IP addresses
|
||||
func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error {
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths := []string{
|
||||
"/etc/kubernetes/admin.conf",
|
||||
"/etc/kubernetes/kubelet.conf",
|
||||
"/etc/kubernetes/controller-manager.conf",
|
||||
"/etc/kubernetes/scheduler.conf",
|
||||
}
|
||||
|
||||
endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(cp.IP, strconv.Itoa(cp.Port)))
|
||||
for _, path := range paths {
|
||||
_, err := k.c.RunCmd(exec.Command("sudo", "/bin/bash", "-c", fmt.Sprintf("grep %s %s || sudo rm -f %s", endpoint, path, path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCluster starts the cluster
|
||||
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
||||
err := bsutil.ExistingConfig(k.c)
|
||||
|
@ -165,13 +195,6 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
glog.Infof("StartCluster complete in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
// Remove admin.conf from any previous run
|
||||
c := exec.Command("/bin/bash", "-c", "sudo rm -f /etc/kubernetes/admin.conf")
|
||||
_, err = k.c.RunCmd(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "deleting admin.conf")
|
||||
}
|
||||
|
||||
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing kubernetes version")
|
||||
|
@ -209,8 +232,12 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
|
||||
}
|
||||
|
||||
if err := k.clearStaleConfigs(cfg); err != nil {
|
||||
return errors.Wrap(err, "clearing stale configs")
|
||||
}
|
||||
|
||||
conf := bsutil.KubeadmYamlPath
|
||||
c = exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
|
||||
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
|
||||
rr, err := k.c.RunCmd(c)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "init failed. output: %q", rr.Output())
|
||||
|
@ -288,7 +315,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, k.c, start, timeout); err != nil {
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -297,7 +324,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return err
|
||||
}
|
||||
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, k.c, start, ip, port, timeout); err != nil {
|
||||
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -306,7 +333,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return errors.Wrap(err, "get k8s client")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, k.c, c, start, timeout); err != nil {
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil {
|
||||
return errors.Wrap(err, "waiting for system pods")
|
||||
}
|
||||
return nil
|
||||
|
@ -314,8 +341,8 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
|
||||
// needsReset returns whether or not the cluster needs to be reconfigured
|
||||
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool {
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
|
||||
glog.Infof("needs reset: configs differ")
|
||||
if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
|
||||
glog.Infof("needs reset: configs differ:\n%s", rr.Output())
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -379,6 +406,10 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := k.clearStaleConfigs(cfg); err != nil {
|
||||
return errors.Wrap(err, "clearing stale configs")
|
||||
}
|
||||
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil {
|
||||
return errors.Wrap(err, "mv")
|
||||
}
|
||||
|
@ -406,11 +437,11 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
// We must ensure that the apiserver is healthy before proceeding
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "apiserver healthz")
|
||||
}
|
||||
|
||||
if err := kverify.WaitForSystemPods(cr, k, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
|
||||
return errors.Wrap(err, "system pods")
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
@ -87,9 +88,9 @@ type logRunner interface {
|
|||
const lookBackwardsCount = 400
|
||||
|
||||
// Follow follows logs from multiple files in tail(1) format
|
||||
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) error {
|
||||
func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) error {
|
||||
cs := []string{}
|
||||
for _, v := range logCommands(r, bs, 0, true) {
|
||||
for _, v := range logCommands(r, bs, cfg, 0, true) {
|
||||
cs = append(cs, v+" &")
|
||||
}
|
||||
cs = append(cs, "wait")
|
||||
|
@ -109,9 +110,9 @@ func IsProblem(line string) bool {
|
|||
}
|
||||
|
||||
// FindProblems finds possible root causes among the logs
|
||||
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cr logRunner) map[string][]string {
|
||||
func FindProblems(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr logRunner) map[string][]string {
|
||||
pMap := map[string][]string{}
|
||||
cmds := logCommands(r, bs, lookBackwardsCount, false)
|
||||
cmds := logCommands(r, bs, cfg, lookBackwardsCount, false)
|
||||
for name := range cmds {
|
||||
glog.Infof("Gathering logs for %s ...", name)
|
||||
var b bytes.Buffer
|
||||
|
@ -153,8 +154,8 @@ func OutputProblems(problems map[string][]string, maxLines int) {
|
|||
}
|
||||
|
||||
// Output displays logs from multiple sources in tail(1) format
|
||||
func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Runner, lines int) error {
|
||||
cmds := logCommands(r, bs, lines, false)
|
||||
func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, runner command.Runner, lines int) error {
|
||||
cmds := logCommands(r, bs, cfg, lines, false)
|
||||
cmds["kernel"] = "uptime && uname -a && grep PRETTY /etc/os-release"
|
||||
|
||||
names := []string{}
|
||||
|
@ -191,8 +192,8 @@ func Output(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner command.Run
|
|||
}
|
||||
|
||||
// logCommands returns a list of commands that would be run to receive the anticipated logs
|
||||
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, follow bool) map[string]string {
|
||||
cmds := bs.LogCommands(bootstrapper.LogOptions{Lines: length, Follow: follow})
|
||||
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {
|
||||
cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})
|
||||
for _, pod := range importantPods {
|
||||
ids, err := r.ListContainers(cruntime.ListOptions{Name: pod})
|
||||
if err != nil {
|
||||
|
@ -211,5 +212,6 @@ func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, length int, f
|
|||
}
|
||||
cmds[r.Name()] = r.SystemLogCmd(length)
|
||||
cmds["container status"] = cruntime.ContainerStatusCommand()
|
||||
|
||||
return cmds
|
||||
}
|
||||
|
|
|
@ -36,11 +36,11 @@ import (
|
|||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/docker/machine/libmachine/mcnutils"
|
||||
"github.com/docker/machine/libmachine/persist"
|
||||
lib_provision "github.com/docker/machine/libmachine/provision"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/docker/machine/libmachine/swarm"
|
||||
"github.com/docker/machine/libmachine/version"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
|
@ -49,7 +49,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/registry"
|
||||
"k8s.io/minikube/pkg/minikube/sshutil"
|
||||
"k8s.io/minikube/pkg/provision"
|
||||
)
|
||||
|
||||
// NewRPCClient gets a new client.
|
||||
|
@ -167,6 +166,12 @@ func CommandRunner(h *host.Host) (command.Runner, error) {
|
|||
|
||||
// Create creates the host
|
||||
func (api *LocalClient) Create(h *host.Host) error {
|
||||
glog.Infof("LocalClient.Create starting")
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("LocalClient.Create took %s", time.Since(start))
|
||||
}()
|
||||
|
||||
def := registry.Driver(h.DriverName)
|
||||
if def.Empty() {
|
||||
return fmt.Errorf("driver %q does not exist", h.DriverName)
|
||||
|
@ -210,21 +215,17 @@ func (api *LocalClient) Create(h *host.Host) error {
|
|||
{
|
||||
"provisioning",
|
||||
func() error {
|
||||
// Skippable because we don't reconfigure Docker?
|
||||
if driver.BareMetal(h.Driver.DriverName()) {
|
||||
return nil
|
||||
}
|
||||
var pv lib_provision.Provisioner
|
||||
if driver.IsKIC(h.Driver.DriverName()) {
|
||||
pv = provision.NewUbuntuProvisioner(h.Driver)
|
||||
} else {
|
||||
pv = provision.NewBuildrootProvisioner(h.Driver)
|
||||
}
|
||||
return pv.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions)
|
||||
return provisionDockerMachine(h)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
|
||||
if err := step.f(); err != nil {
|
||||
return errors.Wrap(err, step.name)
|
||||
}
|
||||
|
|
|
@ -140,9 +140,6 @@ func TestStartHostExists(t *testing.T) {
|
|||
if s, _ := h.Driver.GetState(); s != state.Running {
|
||||
t.Fatalf("Machine not started.")
|
||||
}
|
||||
if !md.Provisioner.Provisioned {
|
||||
t.Fatalf("Expected provision to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartHostErrMachineNotExist(t *testing.T) {
|
||||
|
@ -188,9 +185,6 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
|
|||
if s, _ := h.Driver.GetState(); s != state.Running {
|
||||
t.Fatalf("Machine not started.")
|
||||
}
|
||||
if !md.Provisioner.Provisioned {
|
||||
t.Fatalf("Expected provision to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStoppedHost(t *testing.T) {
|
||||
|
@ -226,9 +220,6 @@ func TestStartStoppedHost(t *testing.T) {
|
|||
t.Fatalf("Machine must be saved after starting.")
|
||||
}
|
||||
|
||||
if !md.Provisioner.Provisioned {
|
||||
t.Fatalf("Expected provision to be called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartHost(t *testing.T) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/docker/machine/drivers/virtualbox"
|
||||
"github.com/docker/machine/libmachine"
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
"github.com/docker/machine/libmachine/provision"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -35,7 +34,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
// hostRunner is a minimal host.Host based interface for running commands
|
||||
|
@ -77,17 +75,13 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
return h, err
|
||||
}
|
||||
|
||||
// Technically, we should only have to call provision if Docker has changed,
|
||||
// but who can predict what shape the existing VM is in.
|
||||
e := engineOptions(cc)
|
||||
if len(e.Env) > 0 {
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
glog.Infof("Detecting provisioner ...")
|
||||
provisioner, err := provision.DetectProvisioner(h.Driver)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "detecting provisioner")
|
||||
}
|
||||
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
err = provisionDockerMachine(h)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
|
||||
if driver.IsMock(h.DriverName) {
|
||||
|
@ -103,10 +97,6 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
return h, nil
|
||||
}
|
||||
|
||||
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
|
||||
if err := h.ConfigureAuth(); err != nil {
|
||||
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
|
||||
}
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,14 @@ limitations under the License.
|
|||
package machine
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/host"
|
||||
libprovision "github.com/docker/machine/libmachine/provision"
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/provision"
|
||||
)
|
||||
|
||||
// Machine contains information about a machine
|
||||
|
@ -74,3 +80,31 @@ func LoadMachine(name string) (*Machine, error) {
|
|||
}
|
||||
return &mm, nil
|
||||
}
|
||||
|
||||
// provisionDockerMachine provides fast provisioning of a docker machine
|
||||
func provisionDockerMachine(h *host.Host) error {
|
||||
glog.Infof("provisioning docker machine ...")
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
glog.Infof("provisioned docker machine in %s", time.Since(start))
|
||||
}()
|
||||
|
||||
p, err := fastDetectProvisioner(h)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fast detect")
|
||||
}
|
||||
return p.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions)
|
||||
}
|
||||
|
||||
// fastDetectProvisioner provides a shortcut for provisioner detection
|
||||
func fastDetectProvisioner(h *host.Host) (libprovision.Provisioner, error) {
|
||||
d := h.Driver.DriverName()
|
||||
switch {
|
||||
case driver.IsKIC(d):
|
||||
return provision.NewUbuntuProvisioner(h.Driver), nil
|
||||
case driver.BareMetal(d):
|
||||
return libprovision.DetectProvisioner(h.Driver)
|
||||
default:
|
||||
return provision.NewBuildrootProvisioner(h.Driver), nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/juju/mutex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -43,6 +44,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/sshutil"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
"k8s.io/minikube/pkg/util/lock"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -195,6 +197,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
|
|||
}
|
||||
|
||||
glog.Infof("creating required directories: %v", requiredDirectories)
|
||||
|
||||
r, err := commandRunner(h)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "command runner")
|
||||
|
@ -233,11 +236,19 @@ func commandRunner(h *host.Host) (command.Runner, error) {
|
|||
}
|
||||
|
||||
glog.Infof("Creating SSH client and returning SSHRunner for %q driver", d)
|
||||
client, err := sshutil.NewSSHClient(h.Driver)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ssh client")
|
||||
|
||||
// Retry in order to survive an ssh restart, which sometimes happens due to provisioning
|
||||
var sc *ssh.Client
|
||||
getSSH := func() (err error) {
|
||||
sc, err = sshutil.NewSSHClient(h.Driver)
|
||||
return err
|
||||
}
|
||||
return command.NewSSHRunner(client), nil
|
||||
|
||||
if err := retry.Expo(getSSH, 250*time.Millisecond, 2*time.Second); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return command.NewSSHRunner(sc), nil
|
||||
}
|
||||
|
||||
// acquireMachinesLock protects against code that is not parallel-safe (libmachine, cert setup)
|
||||
|
|
|
@ -17,10 +17,11 @@ limitations under the License.
|
|||
package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -34,12 +35,11 @@ const (
|
|||
|
||||
// Add adds a new node config to an existing cluster.
|
||||
func Add(cc *config.ClusterConfig, n config.Node) error {
|
||||
|
||||
err := config.SaveNode(cc, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := config.SaveNode(cc, &n); err != nil {
|
||||
return errors.Wrap(err, "save node")
|
||||
}
|
||||
|
||||
// TODO: Start should return an error rather than calling exit!
|
||||
Start(*cc, n, nil, false)
|
||||
return nil
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func Add(cc *config.ClusterConfig, n config.Node) error {
|
|||
func Delete(cc config.ClusterConfig, name string) error {
|
||||
n, index, err := Retrieve(&cc, name)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "retrieve")
|
||||
}
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
|
|
|
@ -112,7 +112,7 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
|
|||
bs = setupKubeAdm(machineAPI, cc, n)
|
||||
err = bs.StartCluster(cc)
|
||||
if err != nil {
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, mRunner))
|
||||
exit.WithLogEntries("Error starting cluster", err, logs.FindProblems(cr, bs, cc, mRunner))
|
||||
}
|
||||
} else {
|
||||
bs, err = cluster.Bootstrapper(machineAPI, viper.GetString(cmdcfg.Bootstrapper), cc, n)
|
||||
|
|
|
@ -21,7 +21,6 @@ package integration
|
|||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -33,145 +32,119 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/download"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
)
|
||||
|
||||
func TestDownloadOnly(t *testing.T) {
|
||||
profile := UniqueProfileName("download")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
|
||||
defer Cleanup(t, profile, cancel)
|
||||
for _, r := range []string{"crio", "docker", "containerd"} {
|
||||
t.Run(r, func(t *testing.T) {
|
||||
// Stores the startup run result for later error messages
|
||||
var rrr *RunResult
|
||||
var err error
|
||||
|
||||
// Stores the startup run result for later error messages
|
||||
var rrr *RunResult
|
||||
var err error
|
||||
profile := UniqueProfileName(r)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||
defer Cleanup(t, profile, cancel)
|
||||
|
||||
t.Run("group", func(t *testing.T) {
|
||||
versions := []string{
|
||||
constants.OldestKubernetesVersion,
|
||||
constants.DefaultKubernetesVersion,
|
||||
constants.NewestKubernetesVersion,
|
||||
}
|
||||
for _, v := range versions {
|
||||
t.Run(v, func(t *testing.T) {
|
||||
// Explicitly does not pass StartArgs() to test driver default
|
||||
// --force to avoid uid check
|
||||
args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v)}, StartArgs()...)
|
||||
versions := []string{
|
||||
constants.OldestKubernetesVersion,
|
||||
constants.DefaultKubernetesVersion,
|
||||
constants.NewestKubernetesVersion,
|
||||
}
|
||||
|
||||
// Preserve the initial run-result for debugging
|
||||
if rrr == nil {
|
||||
rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
} else {
|
||||
_, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
}
|
||||
for _, v := range versions {
|
||||
t.Run(v, func(t *testing.T) {
|
||||
// Explicitly does not pass StartArgs() to test driver default
|
||||
// --force to avoid uid check
|
||||
args := append([]string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", args, err)
|
||||
}
|
||||
|
||||
if download.PreloadExists(v, "docker") {
|
||||
// Just make sure the tarball path exists
|
||||
if _, err := os.Stat(download.TarballPath(v)); err != nil {
|
||||
t.Errorf("preloaded tarball path doesn't exist: %v", err)
|
||||
// Preserve the initial run-result for debugging
|
||||
if rrr == nil {
|
||||
rrr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
} else {
|
||||
_, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
imgs, err := images.Kubeadm("", v)
|
||||
if err != nil {
|
||||
t.Errorf("kubeadm images: %v %+v", v, err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", args, err)
|
||||
}
|
||||
|
||||
// skip verify for cache images if --driver=none
|
||||
if !NoneDriver() {
|
||||
for _, img := range imgs {
|
||||
img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", "images", img)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
t.Errorf("expected image file exist at %q but got error: %v", fp, err)
|
||||
if download.PreloadExists(v, r) {
|
||||
// Just make sure the tarball path exists
|
||||
if _, err := os.Stat(download.TarballPath(v)); err != nil {
|
||||
t.Errorf("preloaded tarball path doesn't exist: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
imgs, err := images.Kubeadm("", v)
|
||||
if err != nil {
|
||||
t.Errorf("kubeadm images: %v %+v", v, err)
|
||||
}
|
||||
|
||||
// skip verify for cache images if --driver=none
|
||||
if !NoneDriver() {
|
||||
for _, img := range imgs {
|
||||
img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", "images", img)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
t.Errorf("expected image file exist at %q but got error: %v", fp, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checking binaries downloaded (kubelet,kubeadm)
|
||||
for _, bin := range constants.KubernetesReleaseBinaries {
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
// checking binaries downloaded (kubelet,kubeadm)
|
||||
for _, bin := range constants.KubernetesReleaseBinaries {
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin)
|
||||
_, err := os.Stat(fp)
|
||||
if err != nil {
|
||||
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
||||
}
|
||||
}
|
||||
|
||||
// If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded
|
||||
// as well for the `minikube kubectl` command
|
||||
if runtime.GOOS == "linux" {
|
||||
return
|
||||
}
|
||||
binary := "kubectl"
|
||||
if runtime.GOOS == "windows" {
|
||||
binary = "kubectl.exe"
|
||||
}
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary)
|
||||
if _, err := os.Stat(fp); err != nil {
|
||||
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded
|
||||
// as well for the `minikube kubectl` command
|
||||
if runtime.GOOS == "linux" {
|
||||
return
|
||||
// This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete!
|
||||
t.Run("DeleteAll", func(t *testing.T) {
|
||||
if !CanCleanup() {
|
||||
t.Skip("skipping, as cleanup is disabled")
|
||||
}
|
||||
binary := "kubectl"
|
||||
if runtime.GOOS == "windows" {
|
||||
binary = "kubectl.exe"
|
||||
}
|
||||
fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary)
|
||||
if _, err := os.Stat(fp); err != nil {
|
||||
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Check that the profile we've created has the expected driver
|
||||
t.Run("ExpectedDefaultDriver", func(t *testing.T) {
|
||||
if ExpectedDefaultDriver() == "" {
|
||||
t.Skipf("--expected-default-driver is unset, skipping test")
|
||||
return
|
||||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
var ps map[string][]config.Profile
|
||||
err = json.Unmarshal(rr.Stdout.Bytes(), &ps)
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
|
||||
got := ""
|
||||
for _, p := range ps["valid"] {
|
||||
if p.Name == profile {
|
||||
got = p.Config.Driver
|
||||
// Delete should always succeed, even if previously partially or fully deleted.
|
||||
t.Run("DeleteAlwaysSucceeds", func(t *testing.T) {
|
||||
if !CanCleanup() {
|
||||
t.Skip("skipping, as cleanup is disabled")
|
||||
}
|
||||
}
|
||||
|
||||
if got != ExpectedDefaultDriver() {
|
||||
t.Errorf("got driver %q, expected %q\nstart output: %s", got, ExpectedDefaultDriver(), rrr.Output())
|
||||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete!
|
||||
t.Run("DeleteAll", func(t *testing.T) {
|
||||
if !CanCleanup() {
|
||||
t.Skip("skipping, as cleanup is disabled")
|
||||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
})
|
||||
// Delete should always succeed, even if previously partially or fully deleted.
|
||||
t.Run("DeleteAlwaysSucceeds", func(t *testing.T) {
|
||||
if !CanCleanup() {
|
||||
t.Skip("skipping, as cleanup is disabled")
|
||||
}
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||
if err != nil {
|
||||
t.Errorf("%s failed: %v", rr.Args, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownloadOnlyDocker(t *testing.T) {
|
||||
if !runningDockerDriver(StartArgs()) {
|
||||
t.Skip("this test only runs with the docker driver")
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
|
||||
// General configuration: used to set the VM Driver
|
||||
var startArgs = flag.String("minikube-start-args", "", "Arguments to pass to minikube start")
|
||||
var defaultDriver = flag.String("expected-default-driver", "", "Expected default driver")
|
||||
|
||||
// Flags for faster local integration testing
|
||||
var forceProfile = flag.String("profile", "", "force tests to run against a particular profile")
|
||||
|
@ -69,11 +68,6 @@ func HyperVDriver() bool {
|
|||
return strings.Contains(*startArgs, "--driver=hyperv")
|
||||
}
|
||||
|
||||
// ExpectedDefaultDriver returns the expected default driver, if any
|
||||
func ExpectedDefaultDriver() string {
|
||||
return *defaultDriver
|
||||
}
|
||||
|
||||
// CanCleanup returns if cleanup is allowed
|
||||
func CanCleanup() bool {
|
||||
return *cleanup
|
||||
|
|
|
@ -260,12 +260,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
|
|||
if err != nil {
|
||||
t.Errorf("images unmarshal: %v", err)
|
||||
}
|
||||
gotImages := []string{}
|
||||
found := map[string]bool{}
|
||||
for _, img := range jv["images"] {
|
||||
for _, i := range img.Tags {
|
||||
// Remove container-specific prefixes for naming consistency
|
||||
i = strings.TrimPrefix(i, "docker.io/")
|
||||
i = strings.TrimPrefix(i, "localhost/")
|
||||
if defaultImage(i) {
|
||||
// Remove docker.io for naming consistency between container runtimes
|
||||
gotImages = append(gotImages, strings.TrimPrefix(i, "docker.io/"))
|
||||
found[i] = true
|
||||
} else {
|
||||
t.Logf("Found non-minikube image: %s", i)
|
||||
}
|
||||
|
@ -275,6 +277,10 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
|
|||
if err != nil {
|
||||
t.Errorf("kubeadm images: %v", version)
|
||||
}
|
||||
gotImages := []string{}
|
||||
for k := range found {
|
||||
gotImages = append(gotImages, k)
|
||||
}
|
||||
sort.Strings(want)
|
||||
sort.Strings(gotImages)
|
||||
if diff := cmp.Diff(want, gotImages); diff != "" {
|
||||
|
|
Loading…
Reference in New Issue