From e053e4f235a7dfc88b0964372b9bc00b14447496 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:21:05 +0530 Subject: [PATCH 01/63] Add --vm flag for users who want to autoselect only VM's --- cmd/minikube/cmd/start.go | 3 ++- pkg/minikube/driver/driver.go | 27 ++++++++++++++++++++------- pkg/minikube/driver/driver_test.go | 2 +- pkg/minikube/registry/global.go | 3 --- 4 files changed, 23 insertions(+), 12 deletions(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 44e3d3210c..fea525383b 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -194,6 +194,7 @@ func initDriverFlags() { startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers())) startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.") startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors") + startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers") // kvm2 startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)") @@ -465,7 +466,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState { return ds } - pick, alts := driver.Suggest(driver.Choices()) + pick, alts := driver.Suggest(driver.Choices(viper.GetBool("vm"))) if pick.Name == "" { exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/") } diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index 2dce6350cd..e5ea05b00a 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -164,14 +164,27 @@ func FlagDefaults(name string) FlagHints { } // Choices returns a list of drivers which are possible on this system -func Choices() []registry.DriverState { +func Choices(vm bool) []registry.DriverState { options := registry.Available() - - // Descending priority for predictability and appearance - sort.Slice(options, func(i, j int) bool { - return options[i].Priority > options[j].Priority - }) - return options + if vm { + var vmOptions []registry.DriverState + for _, ds := range options { + if IsVM(ds.Name) { + vmOptions = append(vmOptions,ds) + } + } + // Descending priority for predictability and appearance + sort.Slice(vmOptions, func(i, j int) bool { + return vmOptions[i].Priority > vmOptions[j].Priority + }) + return vmOptions + }else { + // Descending priority for predictability and appearance + sort.Slice(options, func(i, j int) bool { + return options[i].Priority > options[j].Priority + }) + return options + } } // Suggest returns a suggested driver from a set of options diff --git a/pkg/minikube/driver/driver_test.go b/pkg/minikube/driver/driver_test.go index 8f9be829ad..5d0bfd4009 100644 --- a/pkg/minikube/driver/driver_test.go +++ b/pkg/minikube/driver/driver_test.go @@ -162,7 +162,7 @@ func TestSuggest(t *testing.T) { } } - got := Choices() + got := Choices(false) gotNames := []string{} for _, c := range got { gotNames = append(gotNames, c.Name) diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 301f61cb9f..d53168b48d 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -1,12 +1,9 @@ /* Copyright 2018 The Kubernetes Authors All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. From 3216a03c00ee60b93f1102e5ad62a0cb31b1a150 Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:22:24 +0530 Subject: [PATCH 02/63] Add --vm flag for users who want to autoselect only VM's --- pkg/minikube/registry/global.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index d53168b48d..301f61cb9f 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -1,9 +1,12 @@ /* Copyright 2018 The Kubernetes Authors All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. From 37b23cfd6d02dd9f461f9c66054cc01d1759cebc Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Tue, 17 Mar 2020 13:49:22 +0530 Subject: [PATCH 03/63] Add --vm flag for users who want to autoselect only VM's --- pkg/minikube/driver/driver.go | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index e5ea05b00a..b790af5c9b 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -165,26 +165,22 @@ func FlagDefaults(name string) FlagHints { // Choices returns a list of drivers which are possible on this system func Choices(vm bool) []registry.DriverState { + var drivers []registry.DriverState options := registry.Available() if vm { - var vmOptions []registry.DriverState for _, ds := range options { if IsVM(ds.Name) { - vmOptions = append(vmOptions,ds) + drivers = append(drivers, ds) } } - // Descending priority for predictability and appearance - sort.Slice(vmOptions, func(i, j int) bool { - return vmOptions[i].Priority > vmOptions[j].Priority - }) - return vmOptions - }else { - // Descending priority for predictability and appearance - sort.Slice(options, func(i, j int) bool { - return options[i].Priority > options[j].Priority - }) - return options + } else { + drivers = options } + // Descending priority for predictability and appearance + sort.Slice(options, func(i, j int) bool { + return options[i].Priority > options[j].Priority + }) + return drivers } // Suggest returns a suggested driver from a set of options From 6ee7e6fa6736d0f514da3c00341429a34b27b24f Mon Sep 17 00:00:00 2001 From: vikkyomkar Date: Sun, 22 Mar 2020 19:25:03 +0530 Subject: [PATCH 04/63] updated as per suggestion --- pkg/minikube/driver/driver.go | 15 ++-------- pkg/minikube/registry/global.go | 44 ++++++++++++++++++++++++++-- pkg/minikube/registry/global_test.go | 2 +- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b790af5c9b..efbfac95cd 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -165,22 +165,13 @@ func FlagDefaults(name string) FlagHints { // Choices returns a list of drivers which are possible on this system func Choices(vm bool) []registry.DriverState { - var drivers []registry.DriverState - options := registry.Available() - if vm { - for _, ds := range options { - if IsVM(ds.Name) { - drivers = append(drivers, ds) - } - } - } else { - drivers = options - } + options := registry.Available(vm) + // Descending priority for predictability and appearance sort.Slice(options, func(i, j int) bool { return options[i].Priority > options[j].Priority }) - return drivers + return options } // Suggest returns a suggested driver from a set of options diff --git a/pkg/minikube/registry/global.go b/pkg/minikube/registry/global.go index 301f61cb9f..16ede79a27 100644 --- a/pkg/minikube/registry/global.go +++ b/pkg/minikube/registry/global.go @@ -24,6 +24,40 @@ import ( "github.com/golang/glog" ) +const ( + // Podman is Kubernetes in container using podman driver + Podman = "podman" + // Docker is Kubernetes in container using docker driver + Docker = "docker" + // Mock driver + Mock = "mock" + // None driver + None = "none" +) + +// IsKIC checks if the driver is a kubernetes in container +func IsKIC(name string) bool { + return name == Docker || name == Podman +} + +// IsMock checks if the driver is a mock +func IsMock(name string) bool { + return name == Mock +} + +// IsVM checks if the driver is a VM +func IsVM(name string) bool { + if IsKIC(name) || IsMock(name) || BareMetal(name) { + return false + } + return true +} + +// BareMetal returns if this driver is unisolated +func BareMetal(name string) bool { + return name == None || name == Mock +} + var ( // globalRegistry is a globally accessible driver registry globalRegistry = newRegistry() @@ -59,7 +93,7 @@ func Driver(name string) DriverDef { } // Available returns a list of available drivers in the global registry -func Available() []DriverState { +func Available(vm bool) []DriverState { sts := []DriverState{} glog.Infof("Querying for installed drivers using PATH=%s", os.Getenv("PATH")) @@ -76,7 +110,13 @@ func Available() []DriverState { priority = Unhealthy } - sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + if vm { + if IsVM(d.Name) { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } + } else { + sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s}) + } } // Descending priority for predictability diff --git a/pkg/minikube/registry/global_test.go b/pkg/minikube/registry/global_test.go index dbc76b6d51..9cb01a1e35 100644 --- a/pkg/minikube/registry/global_test.go +++ b/pkg/minikube/registry/global_test.go @@ -102,7 +102,7 @@ func TestGlobalAvailable(t *testing.T) { }, } - if diff := cmp.Diff(Available(), expected); diff != "" { + if diff := cmp.Diff(Available(false), expected); diff != "" { t.Errorf("available mismatch (-want +got):\n%s", diff) } } From 31dec90cc890faaad2b5db7d7075fa331f9c5fa4 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Mon, 23 Mar 2020 22:45:22 +0000 Subject: [PATCH 05/63] formatting --- .github/workflows/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a2daefe32d..92259e9e85 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -88,7 +88,7 @@ jobs: docker info || true docker version || true docker ps || true - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -155,7 +155,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -228,7 +228,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -295,7 +295,7 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq @@ -362,12 +362,12 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: install lz4 + - name: Install lz4 shell: bash run: | sudo apt-get update -qq sudo apt-get -qq -y install liblz4-tool - - name: install podman + - name: Install podman shell: bash run: | . /etc/os-release From 972ff3cc00099ec52cb15c0594c767d66560e842 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Tue, 24 Mar 2020 12:21:27 -0700 Subject: [PATCH 06/63] Update DefaultKubernetesVersion to v1.18.0 --- pkg/minikube/bootstrapper/bsutil/kubelet_test.go | 8 ++++---- pkg/minikube/constants/constants.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go index 052b9937dd..660b8e5b91 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet_test.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet_test.go @@ -81,7 +81,7 @@ Wants=crio.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -107,7 +107,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -140,7 +140,7 @@ Wants=containerd.service [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, @@ -167,7 +167,7 @@ Wants=docker.socket [Service] ExecStart= -ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests +ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 33ae576f48..8b181152a7 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -26,9 +26,9 @@ import ( const ( // DefaultKubernetesVersion is the default kubernetes version - DefaultKubernetesVersion = "v1.18.0-rc.1" + DefaultKubernetesVersion = "v1.18.0" // NewestKubernetesVersion is the newest Kubernetes version to test against - NewestKubernetesVersion = "v1.18.0-rc.1" + NewestKubernetesVersion = "v1.18.0" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.11.10" // DefaultClusterName is the default nane for the k8s cluster From 28672dec786c49417879e3159e3100018db2a5fd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 14:43:11 -0700 Subject: [PATCH 07/63] remove ClusterNameFromMachine --- cmd/minikube/cmd/status.go | 12 +++++++----- pkg/minikube/driver/driver.go | 10 +--------- pkg/provision/buildroot.go | 4 +++- pkg/provision/provision.go | 8 +++++--- pkg/provision/ubuntu.go | 2 +- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 0db9e57ea8..7b44488e1c 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -31,6 +31,7 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" @@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{ for _, n := range cc.Nodes { glog.Infof("checking status of %s ...", n.Name) machineName := driver.MachineName(*cc, n) - st, err = status(api, machineName, n.ControlPlane) + st, err = status(api, *cc, n) glog.Infof("%s status: %+v", machineName, st) if err != nil { @@ -150,12 +151,12 @@ func exitCode(st *Status) int { return c } -func status(api libmachine.API, name string, controlPlane bool) (*Status, error) { +func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { - profile, node := driver.ClusterNameFromMachine(name) + controlPlane := n.ControlPlane st := &Status{ - Name: node, + Name: n.Name, Host: Nonexistent, APIServer: Nonexistent, Kubelet: Nonexistent, @@ -163,6 +164,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) Worker: !controlPlane, } + name := driver.MachineName(cc, n) hs, err := machine.Status(api, name) glog.Infof("%s host status = %q (err=%v)", name, hs, err) if err != nil { @@ -205,7 +207,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error) } if st.Kubeconfig != Irrelevant { - ok, err := kubeconfig.IsClusterInConfig(ip, profile) + ok, err := kubeconfig.IsClusterInConfig(ip, cc.Name) glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err) if ok { st.Kubeconfig = Configured diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b6106474d8..e064f70799 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -234,13 +234,5 @@ func MachineName(cc config.ClusterConfig, n config.Node) string { if len(cc.Nodes) == 1 || n.ControlPlane { return cc.Name } - return fmt.Sprintf("%s---%s", cc.Name, n.Name) -} - -// ClusterNameFromMachine retrieves the cluster name embedded in the machine name -func ClusterNameFromMachine(name string) (string, string) { - if strings.Contains(name, "---") { - return strings.Split(name, "---")[0], strings.Split(name, "---")[1] - } - return name, name + return fmt.Sprintf("%s-%s", cc.Name, n.Name) } diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 913d8b34d0..1a9e49338f 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -29,6 +29,8 @@ import ( "github.com/docker/machine/libmachine/provision/pkgaction" "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) @@ -180,7 +182,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), viper.GetString(config.ProfileName), p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 0a297d50d8..4f7aed6cc0 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -36,10 +36,10 @@ import ( "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" "github.com/pkg/errors" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/sshutil" ) @@ -76,6 +76,9 @@ func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.Syste DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", OsReleaseID: osReleaseID, Driver: d, + SwarmOptions: swarm.Options{ + ArbitraryFlags: []string{viper.GetString(config.ProfileName)}, + }, }, } } @@ -208,8 +211,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { return authOptions } -func setContainerRuntimeOptions(name string, p miniProvisioner) error { - cluster, _ := driver.ClusterNameFromMachine(name) +func setContainerRuntimeOptions(name string, cluster string, p miniProvisioner) error { c, err := config.Load(cluster) if err != nil { return errors.Wrap(err, "getting cluster config") diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 9d2b272bd2..c0aaf46780 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -185,7 +185,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.SwarmOptions.ArbitraryFlags[0], p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 4a24cfcf30ee267507b74d022b3bf1e2ffc5c072 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 15:54:39 -0700 Subject: [PATCH 08/63] add cluster name to provisioner object directly --- pkg/provision/buildroot.go | 4 +++- pkg/provision/provision.go | 4 ---- pkg/provision/ubuntu.go | 5 ++++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 1a9e49338f..723e24c72f 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -37,12 +37,14 @@ import ( // BuildrootProvisioner provisions the custom system based on Buildroot type BuildrootProvisioner struct { provision.SystemdProvisioner + clusterName string } // NewBuildrootProvisioner creates a new BuildrootProvisioner func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner { return &BuildrootProvisioner{ NewSystemdProvisioner("buildroot", d), + viper.GetString(config.ProfileName), } } @@ -182,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), viper.GetString(config.ProfileName), p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 4f7aed6cc0..19e6f0c43a 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -36,7 +36,6 @@ import ( "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" "github.com/pkg/errors" - "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" @@ -76,9 +75,6 @@ func NewSystemdProvisioner(osReleaseID string, d drivers.Driver) provision.Syste DaemonOptionsFile: "/etc/systemd/system/docker.service.d/10-machine.conf", OsReleaseID: osReleaseID, Driver: d, - SwarmOptions: swarm.Options{ - ArbitraryFlags: []string{viper.GetString(config.ProfileName)}, - }, }, } } diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index c0aaf46780..2fc32e67a3 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -29,6 +29,8 @@ import ( "github.com/docker/machine/libmachine/provision/pkgaction" "github.com/docker/machine/libmachine/swarm" "github.com/golang/glog" + "github.com/spf13/viper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util/retry" ) @@ -42,6 +44,7 @@ func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner { return &UbuntuProvisioner{ BuildrootProvisioner{ NewSystemdProvisioner("ubuntu", d), + viper.GetString(config.ProfileName), }, } } @@ -185,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.SwarmOptions.ArbitraryFlags[0], p); err != nil { + if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 06dca4555357990d14e27e4b70066fda5232e9e8 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Tue, 24 Mar 2020 16:10:20 -0700 Subject: [PATCH 09/63] lint --- pkg/provision/buildroot.go | 2 +- pkg/provision/provision.go | 4 ++-- pkg/provision/ubuntu.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 723e24c72f..2f630f9332 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -184,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } diff --git a/pkg/provision/provision.go b/pkg/provision/provision.go index 19e6f0c43a..7b2e9e6539 100644 --- a/pkg/provision/provision.go +++ b/pkg/provision/provision.go @@ -207,8 +207,8 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options { return authOptions } -func setContainerRuntimeOptions(name string, cluster string, p miniProvisioner) error { - c, err := config.Load(cluster) +func setContainerRuntimeOptions(name string, p miniProvisioner) error { + c, err := config.Load(name) if err != nil { return errors.Wrap(err, "getting cluster config") } diff --git a/pkg/provision/ubuntu.go b/pkg/provision/ubuntu.go index 2fc32e67a3..3fbf006b69 100644 --- a/pkg/provision/ubuntu.go +++ b/pkg/provision/ubuntu.go @@ -188,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au } glog.Infof("setting minikube options for container-runtime") - if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p.clusterName, p); err != nil { + if err := setContainerRuntimeOptions(p.clusterName, p); err != nil { glog.Infof("Error setting container-runtime options during provisioning %v", err) return err } From 4383a4883c8a5050097c9568fa7a1740ea152a4d Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 07:28:23 -0700 Subject: [PATCH 10/63] Improve host recreation experience --- pkg/minikube/machine/delete.go | 33 +++++++++++++++++++++++ pkg/minikube/machine/fix.go | 48 ++++++++++++++-------------------- pkg/minikube/out/style.go | 1 - pkg/minikube/out/style_enum.go | 1 - 4 files changed, 52 insertions(+), 31 deletions(-) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index c51a9d0931..136d232ac4 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -22,11 +22,13 @@ import ( "time" "github.com/docker/machine/libmachine" + "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/minikube/pkg/drivers/kic/oci" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/out" ) @@ -100,3 +102,34 @@ func DeleteHost(api libmachine.API, machineName string) error { } return nil } + +// destroy demolishes a host by any means necessary +// Use only when the machine state appears to be inconsistent +func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { + machineName := driver.MachineName(cc, n) + glog.Infof("destroying %s ...", machineName) + + // First try using the official friendly API's. + + // This will probably fail + err := StopHost(api, machineName) + if err != nil { + glog.Infof("stophost failed: %v", err) + } + + // For 95% of cases, this should be enough + err = DeleteHost(api, machineName) + if err != nil { + glog.Warningf("deletehost failed: %v", err) + } + + // DeleteHost may have returned success prematurely. Go further. + if err = h.Driver.Remove(); err != nil { + glog.Warningf("driver remove failed: %v", err) + } + + // Clean up the local files relating to this machine + if err = api.Remove(cc.Name); err != nil { + glog.Warningf("api remove failed: %v", err) + } +} diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 9575f92294..943bc7d8b0 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -54,8 +54,6 @@ var ( // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { - out.T(out.Waiting, "Reconfiguring existing host ...") - start := time.Now() glog.Infof("fixHost starting: %s", n.Name) defer func() { @@ -101,43 +99,35 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. } func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { + machineName := driver.MachineName(cc, n) + machineType := driver.MachineType(cc.Driver) + s, err := h.Driver.GetState() + glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, err) if err != nil || s == state.Stopped || s == state.None { // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine me, err := machineExists(h.Driver.DriverName(), s, err) - if !me { - // If the error is that virtual machine does not exist error, handle error(recreate virtual machine) - if err == ErrorMachineNotExist { - // remove virtual machine - if err := h.Driver.Remove(); err != nil { - // skip returning error since it may be before docker image pulling(so, no host exist) - if h.Driver.DriverName() != driver.Docker { - return nil, errors.Wrap(err, "host remove") - } - } - // remove machine config directory - if err := api.Remove(cc.Name); err != nil { - return nil, errors.Wrap(err, "api remove") - } - // recreate virtual machine - out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name}) - h, err = createHost(api, cc, n) - if err != nil { - return nil, errors.Wrap(err, "Error recreating VM") - } - // return ErrMachineNotExist err to initialize preExists flag - return h, ErrorMachineNotExist + glog.Infof("exists: %v err=%v", me, err) + + if !me || err == ErrorMachineNotExist { + out.T(out.Provisioning, `Recreating {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + destroy(api, cc, n, h) + time.Sleep(1 * time.Second) + h, err = createHost(api, cc, n) + if err != nil { + return nil, errors.Wrap(err, "recreate") + } + s, err = h.Driver.GetState() + if err != nil { + return nil, errors.Wrap(err, "recreated state") } - // If the error is not that virtual machine does not exist error, return error - return nil, errors.Wrap(err, "Error getting state for host") } } - machineType := driver.MachineType(cc.Driver) if s == state.Running { - out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) } else { - out.T(out.Restarting, `Starting existing {{.driver_name}} {{.machine_type}} for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType}) + out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) if err := h.Driver.Start(); err != nil { return h, errors.Wrap(err, "driver start") } diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 150a7af30d..2ae3f25304 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -60,7 +60,6 @@ var styles = map[StyleEnum]style{ Running: {Prefix: "🏃 "}, Provisioning: {Prefix: "🌱 "}, Restarting: {Prefix: "🔄 "}, - Reconfiguring: {Prefix: "📯 "}, Stopping: {Prefix: "✋ "}, Stopped: {Prefix: "🛑 "}, Warning: {Prefix: "❗ ", LowPrefix: lowWarning}, diff --git a/pkg/minikube/out/style_enum.go b/pkg/minikube/out/style_enum.go index 747c277faf..097f4452dc 100644 --- a/pkg/minikube/out/style_enum.go +++ b/pkg/minikube/out/style_enum.go @@ -32,7 +32,6 @@ const ( Running Provisioning Restarting - Reconfiguring Stopping Stopped Warning From d33684d7eb68e0684f0b35b32f683ceb03563526 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 08:10:08 -0700 Subject: [PATCH 11/63] Make private stop/delete functions to avoid wheel reinvention --- pkg/minikube/machine/delete.go | 31 +++++++++++------------- pkg/minikube/machine/fix.go | 43 ++++++++++++++++++++-------------- pkg/minikube/machine/stop.go | 17 +++++++++----- 3 files changed, 50 insertions(+), 41 deletions(-) diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 136d232ac4..69b493ac26 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -87,11 +87,16 @@ func DeleteHost(api libmachine.API, machineName string) error { } out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if err := host.Driver.Remove(); err != nil { + return delete(api, host, machineName) +} + +// delete removes a host and it's local data files +func delete(api libmachine.API, h *host.Host, machineName string) error { + if err := h.Driver.Remove(); err != nil { glog.Warningf("remove failed, will retry: %v", err) time.Sleep(2 * time.Second) - nerr := host.Driver.Remove() + nerr := h.Driver.Remove() if nerr != nil { return errors.Wrap(nerr, "host remove retry") } @@ -103,18 +108,17 @@ func DeleteHost(api libmachine.API, machineName string) error { return nil } -// destroy demolishes a host by any means necessary -// Use only when the machine state appears to be inconsistent -func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { +// demolish destroys a host by any means necessary - use only if state is inconsistent +func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { machineName := driver.MachineName(cc, n) glog.Infof("destroying %s ...", machineName) - // First try using the official friendly API's. + // First try using the friendly API's. // This will probably fail - err := StopHost(api, machineName) + err := stop(h) if err != nil { - glog.Infof("stophost failed: %v", err) + glog.Infof("stophost failed (probably ok): %v", err) } // For 95% of cases, this should be enough @@ -123,13 +127,6 @@ func destroy(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host glog.Warningf("deletehost failed: %v", err) } - // DeleteHost may have returned success prematurely. Go further. - if err = h.Driver.Remove(); err != nil { - glog.Warningf("driver remove failed: %v", err) - } - - // Clean up the local files relating to this machine - if err = api.Remove(cc.Name); err != nil { - glog.Warningf("api remove failed: %v", err) - } + err = delete(api, h, machineName) + glog.Warningf("delete failed (probably ok) %v", err) } diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 943bc7d8b0..cc8e8e8271 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -101,41 +101,48 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { machineName := driver.MachineName(cc, n) machineType := driver.MachineType(cc.Driver) + recreated := false + s, serr := h.Driver.GetState() - s, err := h.Driver.GetState() - glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, err) - if err != nil || s == state.Stopped || s == state.None { + glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, serr) + if serr != nil || s == state.Stopped || s == state.None { // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine - me, err := machineExists(h.Driver.DriverName(), s, err) + me, err := machineExists(h.Driver.DriverName(), s, serr) glog.Infof("exists: %v err=%v", me, err) if !me || err == ErrorMachineNotExist { - out.T(out.Provisioning, `Recreating {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - destroy(api, cc, n, h) + out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + demolish(api, cc, n, h) time.Sleep(1 * time.Second) h, err = createHost(api, cc, n) if err != nil { return nil, errors.Wrap(err, "recreate") } - s, err = h.Driver.GetState() - if err != nil { - return nil, errors.Wrap(err, "recreated state") - } + recreated = true + s, serr = h.Driver.GetState() } } + if serr != ErrorMachineNotExist { + glog.Warningf("unexpected machine state, will restart: %v", serr) + } + if s == state.Running { - out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - } else { - out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) - if err := h.Driver.Start(); err != nil { - return h, errors.Wrap(err, "driver start") - } - if err := api.Save(h); err != nil { - return h, errors.Wrap(err, "save") + if !recreated { + out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) } + return h, nil } + if !recreated { + out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) + } + if err := h.Driver.Start(); err != nil { + return h, errors.Wrap(err, "driver start") + } + if err := api.Save(h); err != nil { + return h, errors.Wrap(err, "save") + } return h, nil } diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 97931a2b00..5c4ba4e671 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -30,25 +30,30 @@ import ( // StopHost stops the host VM, saving state to disk. func StopHost(api libmachine.API, machineName string) error { - host, err := api.Load(machineName) + h, err := api.Load(machineName) if err != nil { return errors.Wrapf(err, "load") } - out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName}) - if host.DriverName == driver.HyperV { + out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": h.DriverName}) + return stop(h) +} + +// stop forcibly stops a host without needing to load +func stop(h *host.Host) error { + if h.DriverName == driver.HyperV { glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") - if err := trySSHPowerOff(host); err != nil { + if err := trySSHPowerOff(h); err != nil { return errors.Wrap(err, "ssh power off") } } - if err := host.Stop(); err != nil { + if err := h.Stop(); err != nil { alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) if ok && alreadyInStateError.State == state.Stopped { return nil } - return &retry.RetriableError{Err: errors.Wrapf(err, "Stop: %s", machineName)} + return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } return nil } From a8d63f7fa55423c81eb9947076257363eab09273 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 10:24:57 -0700 Subject: [PATCH 12/63] none: Skip checkHelmTiller if socat is not installed --- test/integration/addons_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index a784f381bf..3e001cd072 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -267,9 +267,17 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) t.Fatalf("wait: %v", err) } + if NoneDriver() { + _, err := exec.LookPath("socat") + if err != nil { + t.Skipf("socat is required by kubectl to complete this test") + } + } + want := "Server: &version.Version" // Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod) checkHelmTiller := func() error { + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version")) if err != nil { return err From 442a7869d3bf9d6e5689afcf5dbb6062c42392ef Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 12:48:51 -0700 Subject: [PATCH 13/63] Re-initalize failed Kubernetes clusters --- go.sum | 2 - pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 104 +++++++++++++------ 2 files changed, 75 insertions(+), 31 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5de297a003..9308439bdd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -51,6 +51,7 @@ import ( "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/kubelet" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/vmpath" @@ -129,7 +130,7 @@ func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogO dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines)) } - describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s", + describeNodes := fmt.Sprintf("sudo %s describe nodes --kubeconfig=%s", path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"), path.Join(vmpath.GuestPersistentDir, "kubeconfig")) @@ -181,20 +182,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { return nil } -// StartCluster starts the cluster -func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { - err := bsutil.ExistingConfig(k.c) - if err == nil { // if there is an existing cluster don't reconfigure it - return k.restartCluster(cfg) - } - glog.Infof("existence check: %v", err) - - start := time.Now() - glog.Infof("StartCluster: %+v", cfg) - defer func() { - glog.Infof("StartCluster complete in %s", time.Since(start)) - }() - +func (k *Bootstrapper) init(cfg config.ClusterConfig) error { version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion) if err != nil { return errors.Wrap(err, "parsing kubernetes version") @@ -237,10 +225,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } conf := bsutil.KubeadmYamlPath - c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) - rr, err := k.c.RunCmd(c) - if err != nil { - return errors.Wrapf(err, "init failed. output: %q", rr.Output()) + c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", + bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) + if _, err := k.c.RunCmd(c); err != nil { + return errors.Wrap(err, "run") } if cfg.Driver == driver.Docker { @@ -258,10 +246,47 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { } if err := k.elevateKubeSystemPrivileges(cfg); err != nil { - glog.Warningf("unable to create cluster role binding, some addons might not work : %v. ", err) + glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err) + } + return nil +} + +// StartCluster starts the cluster +func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { + start := time.Now() + glog.Infof("StartCluster: %+v", cfg) + defer func() { + glog.Infof("StartCluster complete in %s", time.Since(start)) + }() + + if err := bsutil.ExistingConfig(k.c); err == nil { + glog.Infof("found existing configuration files, will attempt cluster restart") + rerr := k.restartCluster(cfg) + if rerr == nil { + return nil + } + out.T(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + // Fall-through to init } - return nil + conf := bsutil.KubeadmYamlPath + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") + } + + err := k.init(cfg) + if err == nil { + return nil + } + + out.T(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err}) + if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil { + glog.Warningf("delete failed: %v", err) + } + return k.init(cfg) } func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) { @@ -410,8 +435,8 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "clearing stale configs") } - if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil { - return errors.Wrap(err, "mv") + if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil { + return errors.Wrap(err, "cp") } baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase) @@ -425,9 +450,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { glog.Infof("resetting cluster from %s", conf) // Run commands one at a time so that it is easier to root cause failures. for _, c := range cmds { - rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c)) if err != nil { - return errors.Wrapf(err, "running cmd: %s", rr.Command()) + return errors.Wrap(err, "run") } } @@ -504,11 +529,32 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error { cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion)) } - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil { - return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command()) + rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)) + if derr != nil { + glog.Warningf("%s: %v", rr.Command(), err) } - return nil + if err := kubelet.ForceStop(k.c); err != nil { + glog.Warningf("stop kubelet: %v", err) + } + + cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket}) + if err != nil { + return errors.Wrap(err, "runtime") + } + + containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}}) + if err != nil { + glog.Warningf("unable to list kube-system containers: %v", err) + } + if len(containers) > 0 { + glog.Warningf("found %d kube-system containers to stop", len(containers)) + if err := cr.StopContainers(containers); err != nil { + glog.Warningf("error stopping containers: %v", err) + } + } + + return derr } // SetupCerts sets up certificates within the cluster. @@ -619,7 +665,7 @@ func reloadKubelet(runner command.Runner) error { return nil } - startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf)) + startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cp %s.new %s && sudo cp %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf)) if _, err := runner.RunCmd(startCmd); err != nil { return errors.Wrap(err, "starting kubelet") } From d18cb6fe09ac2577749eaeb534e09f4c76281c7d Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:26:47 -0700 Subject: [PATCH 14/63] install socat --- .github/workflows/main.yml | 6 ++++-- hack/jenkins/linux_integration_tests_none.sh | 6 ++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 79a9f0982e..44df941563 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -233,7 +233,8 @@ jobs: runs-on: ubuntu-16.04 steps: # conntrack is required for kubernetes 1.18 and higher - - name: Install conntrack + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none shell: bash run: | sudo apt-get update -qq @@ -303,7 +304,8 @@ jobs: runs-on: ubuntu-18.04 steps: # conntrack is required for kubernetes 1.18 and higher - - name: Install conntrack + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon + - name: Install tools for none shell: bash run: | sudo apt-get update -qq diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index 9f45f2ad54..d6c99722c6 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -58,6 +58,12 @@ if ! conntrack --version &>/dev/null; then sudo apt-get -qq -y install conntrack fi + # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon +if ! which socat &>/dev/null; then + echo "WARNING: No socat is not installed" + sudo apt-get update -qq + sudo apt-get -qq -y install socat +fi mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES" sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP" From 23b4ec9df342b16a35fb92ea79b2c14f51bfe793 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:28:45 -0700 Subject: [PATCH 15/63] install socat gh actions --- .github/workflows/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 44df941563..4a0bd5b0aa 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -239,6 +239,7 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat - name: Install gopogh shell: bash run: | @@ -310,6 +311,7 @@ jobs: run: | sudo apt-get update -qq sudo apt-get -qq -y install conntrack + sudo apt-get -qq -y install socat - name: Install gopogh shell: bash run: | From 093b7c29be4272cfaaa2781c1e445dfba6088bb8 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 13:30:08 -0700 Subject: [PATCH 16/63] improve comments --- hack/jenkins/linux_integration_tests_none.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/jenkins/linux_integration_tests_none.sh b/hack/jenkins/linux_integration_tests_none.sh index d6c99722c6..0549fde508 100755 --- a/hack/jenkins/linux_integration_tests_none.sh +++ b/hack/jenkins/linux_integration_tests_none.sh @@ -53,14 +53,14 @@ sudo systemctl is-active --quiet kubelet \ # conntrack is required for kubernetes 1.18 and higher for none driver if ! conntrack --version &>/dev/null; then - echo "WARNING: No contrack is not installed" + echo "WARNING: contrack is not installed. will try to install." sudo apt-get update -qq sudo apt-get -qq -y install conntrack fi # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon if ! which socat &>/dev/null; then - echo "WARNING: No socat is not installed" + echo "WARNING: socat is not installed. will try to install." sudo apt-get update -qq sudo apt-get -qq -y install socat fi From ea9aa72984bddfbf29b3a4573c0eed483dc62c0f Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 13:44:28 -0700 Subject: [PATCH 17/63] Retry addon application --- go.sum | 2 -- pkg/addons/addons.go | 17 +++++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/addons/addons.go b/pkg/addons/addons.go index d644aafc88..a3b1600a3d 100644 --- a/pkg/addons/addons.go +++ b/pkg/addons/addons.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/storageclass" + "k8s.io/minikube/pkg/util/retry" ) // defaultStorageClassProvisioner is the name of the default storage class provisioner @@ -211,13 +212,17 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, } command := kubectlCommand(cc, deployFiles, enable) - glog.Infof("Running: %v", command) - rr, err := cmd.RunCmd(command) - if err != nil { - return errors.Wrapf(err, "addon apply") + + // Retry, because sometimes we race against an apiserver restart + apply := func() error { + _, err := cmd.RunCmd(command) + if err != nil { + glog.Warningf("apply failed, will retry: %v", err) + } + return err } - glog.Infof("output:\n%s", rr.Output()) - return nil + + return retry.Expo(apply, 1*time.Second, time.Second*30) } // enableOrDisableStorageClasses enables or disables storage classes From c3fba065a9e97d48e616885f405e9ab15dbbfcee Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 14:07:39 -0700 Subject: [PATCH 18/63] do not override hostname if extraConfig is specified --- pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index c2180838a3..4068deac21 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -62,7 +62,7 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage extraOpts["node-ip"] = cp.IP } nodeName := KubeNodeName(mc, nc) - if nodeName != "" { + if _, ok := extraOpts["hostname-override"]; !ok { extraOpts["hostname-override"] = nodeName } From 6417e85f5b1d78865372230fba6b21f2fac2ac8e Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 14:40:40 -0700 Subject: [PATCH 19/63] fix ordering --- pkg/minikube/bootstrapper/bsutil/kubelet.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kubelet.go b/pkg/minikube/bootstrapper/bsutil/kubelet.go index 4068deac21..1ed22d17c6 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubelet.go +++ b/pkg/minikube/bootstrapper/bsutil/kubelet.go @@ -61,8 +61,8 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage if _, ok := extraOpts["node-ip"]; !ok { extraOpts["node-ip"] = cp.IP } - nodeName := KubeNodeName(mc, nc) if _, ok := extraOpts["hostname-override"]; !ok { + nodeName := KubeNodeName(mc, nc) extraOpts["hostname-override"] = nodeName } From f84569aab05a7a7d5e2a929c1045d5a3a9e425fd Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:46:44 -0700 Subject: [PATCH 20/63] Make crazy cluster_test / mock_driver combination pass tests --- cmd/minikube/cmd/stop.go | 7 ++++--- pkg/minikube/constants/constants.go | 4 ++++ pkg/minikube/machine/cluster_test.go | 7 ++++++- pkg/minikube/machine/delete.go | 6 ++---- pkg/minikube/machine/fix.go | 26 +++++++++++++------------- pkg/minikube/machine/stop.go | 11 +++++++++-- pkg/minikube/tests/driver_mock.go | 16 +++++++++++----- 7 files changed, 49 insertions(+), 28 deletions(-) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 6d79b0446f..1036a5d365 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -69,8 +69,9 @@ func runStop(cmd *cobra.Command, args []string) { func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool { nonexistent := false - stop := func() (err error) { - machineName := driver.MachineName(cluster, n) + machineName := driver.MachineName(cluster, n) + + tryStop := func() (err error) { err = machine.StopHost(api, machineName) if err == nil { return nil @@ -87,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool } } - if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil { + if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil { exit.WithError("Unable to stop VM", err) } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 33ae576f48..af053eee32 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -17,6 +17,7 @@ limitations under the License. package constants import ( + "errors" "path/filepath" "k8s.io/client-go/tools/clientcmd" @@ -100,4 +101,7 @@ var ( "storage-gluster", "istio-operator", } + + // ErrMachineMissing is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) + ErrMachineMissing = errors.New("machine does not exist") ) diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 0d0c16c64b..6d687d3825 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -17,11 +17,13 @@ limitations under the License. package machine import ( + "flag" "fmt" "testing" "time" // Driver used by testdata + "k8s.io/minikube/pkg/minikube/constants" _ "k8s.io/minikube/pkg/minikube/registry/drvs/virtualbox" "github.com/docker/machine/libmachine/drivers" @@ -41,6 +43,9 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e } func RegisterMockDriver(t *testing.T) { + // Debugging this test is a nightmare. + flag.Lookup("logtostderr").Value.Set("true") + t.Helper() if !registry.Driver(driver.Mock).Empty() { return @@ -163,7 +168,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) { // This should pass with creating host, while machine does not exist. h, _, err = StartHost(api, mc, n) if err != nil { - if err != ErrorMachineNotExist { + if err != constants.ErrMachineMissing { t.Fatalf("Error starting host: %v", err) } } diff --git a/pkg/minikube/machine/delete.go b/pkg/minikube/machine/delete.go index 69b493ac26..2132d16737 100644 --- a/pkg/minikube/machine/delete.go +++ b/pkg/minikube/machine/delete.go @@ -94,7 +94,7 @@ func DeleteHost(api libmachine.API, machineName string) error { func delete(api libmachine.API, h *host.Host, machineName string) error { if err := h.Driver.Remove(); err != nil { glog.Warningf("remove failed, will retry: %v", err) - time.Sleep(2 * time.Second) + time.Sleep(1 * time.Second) nerr := h.Driver.Remove() if nerr != nil { @@ -111,9 +111,7 @@ func delete(api libmachine.API, h *host.Host, machineName string) error { // demolish destroys a host by any means necessary - use only if state is inconsistent func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) { machineName := driver.MachineName(cc, n) - glog.Infof("destroying %s ...", machineName) - - // First try using the friendly API's. + glog.Infof("DEMOLISHING %s ...", machineName) // This will probably fail err := stop(h) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index cc8e8e8271..dd1f927a65 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -47,11 +47,6 @@ const ( maxClockDesyncSeconds = 2.1 ) -var ( - // ErrorMachineNotExist is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C) - ErrorMachineNotExist = errors.New("machine does not exist") -) - // fixHost fixes up a previously configured VM so that it is ready to run Kubernetes func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) { start := time.Now() @@ -109,21 +104,26 @@ func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node // If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine me, err := machineExists(h.Driver.DriverName(), s, serr) glog.Infof("exists: %v err=%v", me, err) + glog.Infof("%q vs %q", err, constants.ErrMachineMissing) - if !me || err == ErrorMachineNotExist { + if !me || err == constants.ErrMachineMissing { out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType}) demolish(api, cc, n, h) + + glog.Infof("Sleeping 1 second for extra luck!") time.Sleep(1 * time.Second) + h, err = createHost(api, cc, n) if err != nil { return nil, errors.Wrap(err, "recreate") } + recreated = true s, serr = h.Driver.GetState() } } - if serr != ErrorMachineNotExist { + if serr != constants.ErrMachineMissing { glog.Warningf("unexpected machine state, will restart: %v", serr) } @@ -219,7 +219,7 @@ func adjustGuestClock(h hostRunner, t time.Time) error { func machineExistsState(s state.State, err error) (bool, error) { if s == state.None { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -228,7 +228,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { _ = s // not used if err == drverr { // if the error matches driver error - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -236,7 +236,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) { func machineExistsMessage(s state.State, err error, msg string) (bool, error) { if s == state.None || (err != nil && err.Error() == msg) { // if the error contains the message - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -244,10 +244,10 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) { func machineExistsDocker(s state.State, err error) (bool, error) { if s == state.Error { // if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } else if s == state.None { // if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err } @@ -279,7 +279,7 @@ func machineExists(d string, s state.State, err error) (bool, error) { return machineExistsDocker(s, err) case driver.Mock: if s == state.Error { - return false, ErrorMachineNotExist + return false, constants.ErrMachineMissing } return true, err default: diff --git a/pkg/minikube/machine/stop.go b/pkg/minikube/machine/stop.go index 5c4ba4e671..fafe09e446 100644 --- a/pkg/minikube/machine/stop.go +++ b/pkg/minikube/machine/stop.go @@ -17,6 +17,8 @@ limitations under the License. package machine import ( + "time" + "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" @@ -30,6 +32,7 @@ import ( // StopHost stops the host VM, saving state to disk. func StopHost(api libmachine.API, machineName string) error { + glog.Infof("StopHost: %v", machineName) h, err := api.Load(machineName) if err != nil { return errors.Wrapf(err, "load") @@ -41,6 +44,7 @@ func StopHost(api libmachine.API, machineName string) error { // stop forcibly stops a host without needing to load func stop(h *host.Host) error { + start := time.Now() if h.DriverName == driver.HyperV { glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH") if err := trySSHPowerOff(h); err != nil { @@ -49,12 +53,15 @@ func stop(h *host.Host) error { } if err := h.Stop(); err != nil { - alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState) - if ok && alreadyInStateError.State == state.Stopped { + glog.Infof("stop err: %v", err) + st, ok := err.(mcnerror.ErrHostAlreadyInState) + if ok && st.State == state.Stopped { + glog.Infof("host is already stopped") return nil } return &retry.RetriableError{Err: errors.Wrap(err, "stop")} } + glog.Infof("stop complete within %s", time.Since(start)) return nil } diff --git a/pkg/minikube/tests/driver_mock.go b/pkg/minikube/tests/driver_mock.go index 14d5b2f59d..2b9dff6ad0 100644 --- a/pkg/minikube/tests/driver_mock.go +++ b/pkg/minikube/tests/driver_mock.go @@ -17,6 +17,7 @@ limitations under the License. package tests import ( + "runtime" "testing" "github.com/docker/machine/libmachine/drivers" @@ -24,6 +25,7 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/constants" ) // MockDriver is a struct used to mock out libmachine.Driver @@ -96,11 +98,14 @@ func (d *MockDriver) GetSSHKeyPath() string { // GetState returns the state of the driver func (d *MockDriver) GetState() (state.State, error) { - d.Logf("MockDriver.GetState: %v", d.CurrentState) - if d.NotExistError { + _, file, no, _ := runtime.Caller(2) + d.Logf("MockDriver.GetState called from %s#%d: returning %q", file, no, d.CurrentState) + + // NOTE: this logic is questionable + if d.NotExistError && d.CurrentState != state.Stopped && d.CurrentState != state.None { d.CurrentState = state.Error - // don't use cluster.ErrorMachineNotExist to avoid import cycle - return d.CurrentState, errors.New("machine does not exist") + d.Logf("mock NotExistError set, setting state=%s err=%v", d.CurrentState, constants.ErrMachineMissing) + return d.CurrentState, constants.ErrMachineMissing } return d.CurrentState, nil } @@ -123,12 +128,13 @@ func (d *MockDriver) Remove() error { if d.RemoveError { return errors.New("error deleting machine") } + d.NotExistError = false return nil } // Restart restarts the machine func (d *MockDriver) Restart() error { - d.Logf("MockDriver.Restart") + d.Logf("MockDriver.Restart, setting CurrentState=%s", state.Running) d.CurrentState = state.Running return nil } From 1ee125e68852ab592a9e7a26c954feafc59853e3 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:50:13 -0700 Subject: [PATCH 21/63] lint err --- go.sum | 2 -- pkg/minikube/machine/cluster_test.go | 4 +++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.sum b/go.sum index d7d0bc3a61..15a7b48dae 100644 --- a/go.sum +++ b/go.sum @@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0= -github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0= github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= diff --git a/pkg/minikube/machine/cluster_test.go b/pkg/minikube/machine/cluster_test.go index 6d687d3825..01c99de92e 100644 --- a/pkg/minikube/machine/cluster_test.go +++ b/pkg/minikube/machine/cluster_test.go @@ -44,7 +44,9 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e func RegisterMockDriver(t *testing.T) { // Debugging this test is a nightmare. - flag.Lookup("logtostderr").Value.Set("true") + if err := flag.Lookup("logtostderr").Value.Set("true"); err != nil { + t.Logf("unable to set logtostderr: %v", err) + } t.Helper() if !registry.Driver(driver.Mock).Empty() { From 9597fcc1342414b9150faa0d9a971b6c06b1e41b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:52:51 -0700 Subject: [PATCH 22/63] validateMinikubeKubectl: Add --context to kubectl call to pass profile name --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 5111590fe6..ba0af809d2 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -233,7 +233,7 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { - kubectlArgs := []string{"kubectl", "--", "get", "pods"} + kubectlArgs := []string{"kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) From 9089296f2ee805341abd8d2468ec51838fa19315 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 14:57:27 -0700 Subject: [PATCH 23/63] validateMinikubeKubectl: Add -p to minikube call to get Kubernetes version --- test/integration/functional_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index ba0af809d2..63b2a28c9c 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -233,7 +233,8 @@ func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { // validateMinikubeKubectl validates that the `minikube kubectl` command returns content func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) { - kubectlArgs := []string{"kubectl", "--", "--context", profile, "get", "pods"} + // Must set the profile so that it knows what version of Kubernetes to use + kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { t.Fatalf("%s failed: %v", rr.Args, err) From fd1897ddaecaf7fe9b1e508a620e699e2a45e090 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Wed, 25 Mar 2020 15:01:57 -0700 Subject: [PATCH 24/63] replace emoji with spacing issues --- pkg/minikube/out/style.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/out/style.go b/pkg/minikube/out/style.go index 150a7af30d..bcae27ec87 100644 --- a/pkg/minikube/out/style.go +++ b/pkg/minikube/out/style.go @@ -92,7 +92,7 @@ var styles = map[StyleEnum]style{ Caching: {Prefix: "🤹 "}, StartingVM: {Prefix: "🔥 "}, StartingNone: {Prefix: "🤹 "}, - Provisioner: {Prefix: "ℹ️ "}, + Provisioner: {Prefix: "ℹ️ "}, Resetting: {Prefix: "🔄 "}, DeletingHost: {Prefix: "🔥 "}, Copying: {Prefix: "✨ "}, @@ -117,7 +117,7 @@ var styles = map[StyleEnum]style{ Unmount: {Prefix: "🔥 "}, MountOptions: {Prefix: "💾 "}, Fileserver: {Prefix: "🚀 ", OmitNewline: true}, - DryRun: {Prefix: "🏜️ "}, + DryRun: {Prefix: "🌵 "}, AddonEnable: {Prefix: "🌟 "}, AddonDisable: {Prefix: "🌑 "}, } From f52df44fd5dcd5eaadfe709b249dc4d12ea26eca Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Wed, 25 Mar 2020 16:43:35 -0700 Subject: [PATCH 25/63] If preload fails on hot restart, log warning instead of erroring out --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5de297a003..6091024d60 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -531,7 +531,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error { } if err := r.Preload(cfg.KubernetesConfig); err != nil { - return errors.Wrap(err, "preloading") + glog.Infof("prelaoding failed, will try to load cached images: %v", err) } if cfg.KubernetesConfig.ShouldLoadCachedImages { From dc3b842d4164dd96d4c9192833c22b769bd3f2f8 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 25 Mar 2020 17:11:22 -0700 Subject: [PATCH 26/63] Avoid provision.Detector when possible --- pkg/minikube/machine/fix.go | 23 +++++++++++++---------- pkg/minikube/machine/info.go | 22 +++++++++++----------- pkg/minikube/machine/start.go | 2 +- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 9575f92294..05cbb8eec5 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -67,21 +67,24 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.") } + driverName := h.Driver.DriverName() + // check if need to re-run docker-env - maybeWarnAboutEvalEnv(cc.Driver, cc.Name) + maybeWarnAboutEvalEnv(driverName, cc.Name) h, err = recreateIfNeeded(api, cc, n, h) if err != nil { return h, err } - // Technically, we should only have to call provision if Docker has changed, - // but who can predict what shape the existing VM is in. - e := engineOptions(cc) - h.HostOptions.EngineOptions.Env = e.Env - err = provisionDockerMachine(h) - if err != nil { - return h, errors.Wrap(err, "provision") + if !driver.BareMetal(h.Driver.DriverName()) { + glog.Infof("%s is local, skipping re-provision as it requires SSH", driverName) + e := engineOptions(cc) + h.HostOptions.EngineOptions.Env = e.Env + err = provisionDockerMachine(h) + if err != nil { + return h, errors.Wrap(err, "provision") + } } if driver.IsMock(h.DriverName) { @@ -93,11 +96,11 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. } if driver.BareMetal(h.Driver.DriverName()) { - glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName()) + glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName) return h, nil } - return h, ensureSyncedGuestClock(h, cc.Driver) + return h, ensureSyncedGuestClock(h, driverName) } func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) { diff --git a/pkg/minikube/machine/info.go b/pkg/minikube/machine/info.go index 1bae7253e9..c3b4e06569 100644 --- a/pkg/minikube/machine/info.go +++ b/pkg/minikube/machine/info.go @@ -18,13 +18,14 @@ package machine import ( "io/ioutil" + "os/exec" - "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/provision" "github.com/golang/glog" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" "github.com/shirou/gopsutil/mem" + "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/out" ) @@ -80,18 +81,17 @@ func showLocalOsRelease() { } // logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM -func logRemoteOsRelease(drv drivers.Driver) { - provisioner, err := provision.DetectProvisioner(drv) +func logRemoteOsRelease(r command.Runner) { + rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release")) if err != nil { - glog.Errorf("DetectProvisioner: %v", err) + glog.Infof("remote release failed: %v", err) + } + + osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes()) + if err != nil { + glog.Errorf("NewOsRelease: %v", err) return } - osReleaseInfo, err := provisioner.GetOsReleaseInfo() - if err != nil { - glog.Errorf("GetOsReleaseInfo: %v", err) - return - } - - glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName) + glog.Infof("Remote host: %s", osReleaseInfo.PrettyName) } diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index c41b285a2f..e42fc9cf62 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -212,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error { showLocalOsRelease() } if driver.IsVM(mc.Driver) { - logRemoteOsRelease(h.Driver) + logRemoteOsRelease(r) } return syncLocalAssets(r) } From cc0262ef55cbae4e0403943c7ae6b0068be2cb80 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 19:58:38 -0700 Subject: [PATCH 27/63] improve logging and fix some not logged errors --- test/integration/aaa_download_only_test.go | 18 +++--- test/integration/addons_test.go | 68 +++++++++++----------- test/integration/functional_test.go | 36 ++++++------ test/integration/version_upgrade_test.go | 4 +- 4 files changed, 63 insertions(+), 63 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 8be373a5dd..cf6239cd9c 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -67,7 +67,7 @@ func TestDownloadOnly(t *testing.T) { } if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to download only. args: %q %v", args, err) } // skip for none, as none driver does not have preload feature. @@ -75,14 +75,14 @@ func TestDownloadOnly(t *testing.T) { if download.PreloadExists(v, r) { // Just make sure the tarball path exists if _, err := os.Stat(download.TarballPath(v)); err != nil { - t.Errorf("preloaded tarball path doesn't exist: %v", err) + t.Errorf("failed to verify preloaded tarball file exists: %v", err) } return } } imgs, err := images.Kubeadm("", v) if err != nil { - t.Errorf("kubeadm images: %v %+v", v, err) + t.Errorf("failed to get kubeadm images for %v: %+v", v, err) } // skip verify for cache images if --driver=none @@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to delete all. args: %q : %v", rr.Args, err) } }) // Delete should always succeed, even if previously partially or fully deleted. @@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to delete. args: %q: %v", rr.Args, err) } }) }) @@ -158,22 +158,22 @@ func TestDownloadOnlyKic(t *testing.T) { args = append(args, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v:\n%s", args, err, rr.Output()) + t.Errorf("start with download only failed %q : %v:\n%s", args, err) } // Make sure the downloaded image tarball exists tarball := download.TarballPath(constants.DefaultKubernetesVersion) contents, err := ioutil.ReadFile(tarball) if err != nil { - t.Errorf("reading tarball: %v", err) + t.Errorf("failed to read tarball file %q: %v", tarball, err) } // Make sure it has the correct checksum checksum := md5.Sum(contents) remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion)) if err != nil { - t.Errorf("reading checksum file: %v", err) + t.Errorf("failed to read checksum file %q : %v", download.PreloadChecksumPath(constants.DefaultKubernetesVersion), err) } if string(remoteChecksum) != string(checksum[:]) { - t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:])) + t.Errorf("failed to verify checksum. checksum of %q does not match remote checksum (%q != %q)", tarball, string(remoteChecksum), string(checksum[:])) } } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 3e001cd072..43ad112651 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -69,15 +69,15 @@ func TestAddons(t *testing.T) { // Assert that disable/enable works offline rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stop minikube. args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Args, err) } } @@ -88,30 +88,30 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client: %v", client) } if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", Minutes(6)); err != nil { - t.Errorf("waiting for ingress-controller deployment to stabilize: %v", err) + t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "app.kubernetes.io/name=nginx-ingress-controller", Minutes(12)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waititing for nginx-ingress-controller : %v", err) } rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for ngnix pod: %v", err) } if err := kapi.WaitForService(client, "default", "nginx", true, time.Millisecond*500, Minutes(10)); err != nil { - t.Errorf("Error waiting for nginx service to be up") + t.Errorf("failed waiting for nginx service to be up: %v", err) } want := "Welcome to nginx!" @@ -121,7 +121,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Args, rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) @@ -130,32 +130,32 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { } if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil { - t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err) + t.Errorf("failed to get response from ngninx ingress on 127.0.0.1:80: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable ingress addon. args %q : %v", rr.Args, err) } } func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s : %v", profile, err) } start := time.Now() if err := kapi.WaitForRCToStabilize(client, "kube-system", "registry", Minutes(6)); err != nil { - t.Errorf("waiting for registry replicacontroller to stabilize: %v", err) + t.Errorf("failed waiting for registry replicacontroller to stabilize: %v", err) } t.Logf("registry stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "actual-registry=true", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod actual-registry: %v", err) } if _, err := PodWait(ctx, t, profile, "kube-system", "registry-proxy=true", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for pod registry-proxy: %v", err) } // Test from inside the cluster (no curl available on busybox) @@ -166,20 +166,20 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Args, err) } want := "HTTP/1.1 200" if !strings.Contains(rr.Stdout.String(), want) { - t.Errorf("curl = %q, want *%s*", rr.Stdout.String(), want) + t.Errorf("expected curl response be %q, but got *%s*", want, rr.Stdout.String()) } // Test from outside the cluster rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("%s: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Errorf("expected stderr to be -empty- but got: *%q*", rr.Args, rr.Stderr) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) @@ -199,30 +199,30 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { return nil } - if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Minutes(2)); err != nil { - t.Errorf(err.Error()) + if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Seconds(150)); err != nil { + t.Errorf("failed to check external access to %s: %v", u.String(), err.Error()) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable registry addon. args %q: %v", rr.Args, err) } } func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) } start := time.Now() if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "metrics-server", Minutes(6)); err != nil { - t.Errorf("waiting for metrics-server deployment to stabilize: %v", err) + t.Errorf("failed waiting for metrics-server deployment to stabilize: %v", err) } t.Logf("metrics-server stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "k8s-app=metrics-server", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for k8s-app=metrics-server pod: %v", err) } want := "CPU(cores)" @@ -242,29 +242,29 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin // metrics-server takes some time to be able to collect metrics if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil { - t.Errorf(err.Error()) + t.Errorf("failed checking metric server: %v", err.Error()) } rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Args, err) } } func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("kubernetes client: %v", client) + t.Fatalf("failed to get kubernetes client for %s: %v", profile, err) } start := time.Now() if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil { - t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err) + t.Errorf("failed waiting for tiller-deploy deployment to stabilize: %v", err) } t.Logf("tiller-deploy stabilized in %s", time.Since(start)) if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for helm pod: %v", err) } if NoneDriver() { @@ -292,11 +292,11 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) } if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil { - t.Errorf(err.Error()) + t.Errorf("failed checking helm tiller: %v", err.Error()) } rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Args, err) } } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 63b2a28c9c..127ec9dd99 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -270,40 +270,40 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) if !match { - t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err) + t.Errorf("%q failed: %v. Output for custom format did not match", rr.Args, err) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%q failed: %v", rr.Args, err) } if _, ok := jsonObject["Host"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { - t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") } } @@ -312,7 +312,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("%q failed: %v", args, err) } defer func() { ss.Stop(t) @@ -334,7 +334,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { resp, err := retryablehttp.Get(u.String()) if err != nil { - t.Errorf("failed get: %v", err) + t.Errorf("failed to http get %q : %v", u.String(), err) } if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) @@ -349,12 +349,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%q failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) if err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox pod : %v", err) } nslookup := func() error { @@ -364,12 +364,12 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) { // If the coredns process was stable, this retry wouldn't be necessary. if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil { - t.Errorf("nslookup failing: %v", err) + t.Errorf("failed to do nslookup on kubernetes.default: %v", err) } want := []byte("10.96.0.1") if !bytes.Contains(rr.Stdout.Bytes(), want) { - t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) + t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want) } } @@ -454,17 +454,17 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err == nil { - t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command()) + t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command()) } // minikube cache reload. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload")) if err != nil { - t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } // make sure 'cache reload' brought back the manually deleted image. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err != nil { - t.Errorf("expected to get no error for %q but got %v", rr.Command(), err) + t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err) } }) diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index a03676a335..20d131d5ef 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -97,7 +97,7 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Args, err) } s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json")) @@ -127,6 +127,6 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("start and already started minikube failed. args: %q : %v", rr.Args, err) } } From a30ec2574cd9f6a7f596f3a1017b81275967cb91 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:18:05 -0700 Subject: [PATCH 28/63] improve logs for fn_mount_cmd tests' --- test/integration/docker_test.go | 10 +++++----- test/integration/fn_mount_cmd.go | 30 +++++++++++++++--------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index 05075fe579..da26300dd5 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -39,27 +39,27 @@ func TestDockerFlags(t *testing.T) { args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube with args: %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) } for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} { if !strings.Contains(rr.Stdout.String(), envVar) { - t.Errorf("env var %s missing: %s.", envVar, rr.Stdout) + t.Errorf("expected env key/value %q to be passed to minikube's docker and be included in: *%q*.", envVar, rr.Stdout) } } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) } for _, opt := range []string{"--debug", "--icc=true"} { if !strings.Contains(rr.Stdout.String(), opt) { - t.Fatalf("%s = %q, want *%s*", rr.Command(), rr.Stdout, opt) + t.Fatalf("expected %q output to have include *%s* . output: %q", rr.Command(), opt, rr.Stdout) } } } diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index 8a3a9f68ee..6e292af109 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -66,10 +66,10 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { defer func() { if t.Failed() { - t.Logf("%s failed, getting debug info...", t.Name()) + t.Logf("%q failed, getting debug info...", t.Name()) rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("debugging command %q failed : %v", rr.Command(), err) } else { t.Logf("(debug) %s:\n%s", rr.Command(), rr.Stdout) } @@ -78,7 +78,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Cleanup in advance of future tests rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo umount -f /mount-9p")) if err != nil { - t.Logf("%s: %v", rr.Command(), err) + t.Logf("%q: %v", rr.Command(), err) } ss.Stop(t) cancel() @@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Assert that we can access the mount without an error. Display for debugging. rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Args, err) } t.Logf("guest mount directory contents\n%s", rr.Stdout) @@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { tp := filepath.Join("/mount-9p", testMarker) rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Args, err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { @@ -136,28 +136,28 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Start the "busybox-mount" pod. rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for busybox-mount pod: %v", err) } // Read the file written by pod startup p := filepath.Join(tempDir, createdByPod) got, err := ioutil.ReadFile(p) if err != nil { - t.Errorf("readfile %s: %v", p, err) + t.Errorf("failed to read file created by pod %q: %v", p, err) } wantFromPod := []byte("test\n") if !bytes.Equal(got, wantFromPod) { - t.Errorf("%s = %q, want %q", p, got, wantFromPod) + t.Errorf("the content of the file %q is %q, but want it to be: *%q*", p, got, wantFromPod) } // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Args, err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest) @@ -169,27 +169,27 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/fromhost; rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Args, err) } if runtime.GOOS == "windows" { if strings.Contains(rr.Stdout.String(), "Access: 1970-01-01") { - t.Errorf("invalid access time: %v", rr.Stdout) + t.Errorf("expected to get valid access time but got: %q", rr.Stdout) } } if strings.Contains(rr.Stdout.String(), "Modify: 1970-01-01") { - t.Errorf("invalid modify time: %v", rr.Stdout) + t.Errorf("expected to get valid modify time but got: %q", rr.Stdout) } } p = filepath.Join(tempDir, createdByTestRemovedByPod) if _, err := os.Stat(p); err == nil { - t.Errorf("expected file %s to be removed", p) + t.Errorf("expected file %q to be removed but exists !", p) } p = filepath.Join(tempDir, createdByPodRemovedByTest) if err := os.Remove(p); err != nil { - t.Errorf("unexpected error removing file %s: %v", p, err) + t.Errorf("failed to remove file %q: %v", p, err) } } From 2ac46e8ba91754446b8a484d4086da7c55f70cd4 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:22:02 -0700 Subject: [PATCH 29/63] improve logs for fn_pvc --- test/integration/fn_pvc.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index 785783fd80..c387a1750b 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -38,7 +38,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st defer cancel() if _, err := PodWait(ctx, t, profile, "kube-system", "integration-test=storage-provisioner", Minutes(4)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for storage-provisioner: %v", err) } checkStorageClass := func() error { @@ -58,13 +58,13 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st // Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil { - t.Errorf("no default storage class after retry: %v", err) + t.Errorf("failed to check for storage class: %v", err) } // Now create a testpvc rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Args, err) } checkStoragePhase := func() error { @@ -84,6 +84,6 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st } if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil { - t.Fatalf("PV Creation failed with error: %v", err) + t.Fatalf("failed to check storage phase: %v", err) } } From b63f0488546478dc07543d92065b3074996b925f Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 20:26:10 -0700 Subject: [PATCH 30/63] improve logs for fn_tunnel_cmd --- test/integration/fn_tunnel_cmd.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index e4598b3da7..dffb247fb4 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -50,7 +50,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { client, err := kapi.Client(profile) if err != nil { - t.Fatalf("client: %v", err) + t.Fatalf("failed to get kubernetes client for %q: %v", profile, err) } // Pre-Cleanup @@ -62,7 +62,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", args, err) + t.Errorf("failed to start a tunnel: args %q: %v", args, err) } defer ss.Stop(t) @@ -99,7 +99,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { if err != nil { t.Errorf("%s failed: %v", rr.Args, err) } - t.Logf("kubectl get svc nginx-svc:\n%s", rr.Stdout) + t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout) } got := []byte{} @@ -120,11 +120,11 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { return nil } if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil { - t.Errorf("failed to contact nginx at %s: %v", nginxIP, err) + t.Errorf("failed to hit nginx at %q: %v", nginxIP, err) } want := "Welcome to nginx!" if !strings.Contains(string(got), want) { - t.Errorf("body = %q, want *%s*", got, want) + t.Errorf("expected body to contain %q, but got *%q*", want, got) } } From 362254cfc541383d709985eaf5b6ff0ff94e4431 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:43:32 -0700 Subject: [PATCH 31/63] improve test log formatting for functional_test --- test/integration/functional_test.go | 148 ++++++++++++++-------------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 127ec9dd99..f9c4b3b440 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -63,11 +63,11 @@ func TestFunctional(t *testing.T) { } p := localSyncTestPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } p = localTestCertPath() if err := os.Remove(p); err != nil { - t.Logf("unable to remove %s: %v", p, err) + t.Logf("unable to remove %q: %v", p, err) } CleanupWithLogs(t, profile, cancel) }() @@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) { func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Args, err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { @@ -155,10 +155,10 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile) rr, err := Run(t, c) if err != nil { - t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err) + t.Fatalf("failed to do minikube status after eval-ing docker-env %s", err) } if !strings.Contains(rr.Output(), "Running") { - t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output()) + t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%q*", rr.Output()) } mctx, cancel = context.WithTimeout(ctx, Seconds(13)) @@ -167,12 +167,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { - t.Fatalf("Failed to test eval docker-evn %s", err) + t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Args, err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" if !strings.Contains(rr.Output(), expectedImgInside) { - t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output()) + t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%q*", expectedImgInside, rr.Output()) } } @@ -180,11 +180,11 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { srv, err := startHTTPProxy(t) if err != nil { - t.Fatalf("Failed to set up the test proxy: %s", err) + t.Fatalf("failed to set up the test proxy: %s", err) } // Use more memory so that we may reliably fit MySQL and nginx - startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...) + startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...) c := exec.CommandContext(ctx, Target(), startArgs...) env := os.Environ() env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr)) @@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { c.Env = env rr, err := Run(t, c) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed minikube start. args %q: %v", rr.Args, err) } want := "Found network options:" @@ -210,10 +210,10 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get current-context. args %q : %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), profile) { - t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile) + t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String()) } } @@ -221,13 +221,13 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get kubectl pods: args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command()) } if !strings.Contains(rr.Stdout.String(), "kube-system") { - t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout) + t.Errorf("expected stdout to include *kube-system* but got *%q*. args: %q", rr.Stdout, rr.Command()) } } @@ -237,7 +237,7 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get pods. args %q: %v", rr.Args, err) } } @@ -245,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get components. args %q: %v", rr.Args, err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { - t.Fatalf("decode: %v", err) + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Args, err) } for _, i := range cs.Items { @@ -270,28 +270,29 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status. args %q : %v", rr.Args, err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Args, err) } - match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String()) + re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)` + match, _ := regexp.MatchString(re, rr.Stdout.String()) if !match { - t.Errorf("%q failed: %v. Output for custom format did not match", rr.Args, err) + t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Args, rr.Output()) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%q failed: %v", rr.Args, err) + t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Args, err) } if _, ok := jsonObject["Host"]; !ok { t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") @@ -312,7 +313,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"} ss, err := Start(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%q failed: %v", args, err) + t.Errorf("failed to run minikube dashboard. args %q : %v", args, err) } defer func() { ss.Stop(t) @@ -339,7 +340,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) if err != nil { - t.Errorf("Unable to read http response body: %v", err) + t.Errorf("failed to read http response body from dashboard %q: %v", u.String(), err) } t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body) } @@ -349,7 +350,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%q failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) @@ -407,29 +408,29 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Run("cache", func(t *testing.T) { t.Run("add", func(t *testing.T) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { - _, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("Failed to cache image %q", img) + t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Args, err) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { - _, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) + rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err) + t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Args, err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { - t.Errorf("cache list failed: %v", err) + t.Errorf("failed to do cache list. args %q: %v", rr.Args, err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { - t.Errorf("cache list did not include k8s.gcr.io/pause") + t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output()) } if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") { - t.Errorf("cache list should not include busybox:1.28.4-glibc") + t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%q***", rr.Output()) } }) @@ -439,7 +440,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed to get images by %q ssh %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "1.28.4-glibc") { - t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output()) + t.Errorf("expected '1.28.4-glibc' to be in the output but got %q", rr.Output()) } }) @@ -490,16 +491,16 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { - t.Errorf("unexpected failure: %s failed: %v", rr.Args, err) + t.Errorf("failed to config minikube. args %q : %v", rr.Args, err) } got := strings.TrimSpace(rr.Stdout.String()) if got != tc.wantOut { - t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut) + t.Errorf("expected config output for %q to be -%q- but got *%q*", rr.Command(), tc.wantOut, got) } got = strings.TrimSpace(rr.Stderr.String()) if got != tc.wantErr { - t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr) + t.Errorf("expected config error for %q to be -%q- but got *%q*", rr.Command(), tc.wantErr, got) } } } @@ -512,7 +513,7 @@ func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { - t.Errorf("minikube logs missing expected word: %q", word) + t.Errorf("excpeted minikube logs to include word: -%q- but got \n***%q***\n", word, rr.Output()) } } } @@ -549,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles: args %q : %v", rr.Args, err) } // Table output @@ -563,21 +564,20 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) } - }) t.Run("profile_json_output", func(t *testing.T) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Args, err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Args, err) } validProfiles := jsonObject["valid"] profileExists := false @@ -588,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String()) + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) } }) @@ -598,7 +598,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("% failed: %v (may not be an error)", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { @@ -606,48 +606,48 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { - t.Fatalf("wait: %v", err) + t.Fatalf("failed waiting for hello-node pod: %v", err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do service list. args %q : %v", rr.Args, err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { - t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String()) + t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) } // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to get service url. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("unexpected stderr output: %s", rr.Stderr) + t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) } endpoint := strings.TrimSpace(rr.Stdout.String()) u, err := url.Parse(endpoint) if err != nil { - t.Fatalf("failed to parse %q: %v", endpoint, err) + t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err) } if u.Scheme != "https" { - t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https") + t.Errorf("expected scheme to be 'https' but got %q", u.Scheme) } // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url with custom format. args %q: %v", rr.Args, err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { - t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname()) + t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Args) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to get service url. args: %q: %v", rr.Args, err) } endpoint = strings.TrimSpace(rr.Stdout.String()) @@ -656,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { - t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http") + t.Fatalf("expected scheme to be -'http'- got scheme: *%q*", "http", u.Scheme) } t.Logf("url: %s", endpoint) @@ -665,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { - t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK) + t.Fatalf("expeced status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) } } @@ -674,23 +674,23 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list: args %q : %v", rr.Args, err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { - t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output()) + t.Errorf("expected 'addon list' output to include -%q- but got *%q*", a, rr.Output()) } } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to do addon list with json output. args %q: %v", rr.Args, err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to decode addon list json output : %v", err) } } @@ -702,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run an ssh command. args %q : %v", rr.Args, err) } if rr.Stdout.String() != want { - t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want) + t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Args) } } @@ -713,12 +713,12 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Args, err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10)) if err != nil { - t.Fatalf("podwait: %v", err) + t.Fatalf("failed waiting for mysql pod: %v", err) } // Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule. @@ -726,8 +726,8 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;")) return err } - if err = retry.Expo(mysql, 2*time.Second, Seconds(180)); err != nil { - t.Errorf("mysql failing: %v", err) + if err = retry.Expo(mysql, 1*time.Second, Seconds(200)); err != nil { + t.Errorf("failed to exec 'mysql -ppassword -e show databases;': %v", err) } } @@ -757,12 +757,12 @@ func setupFileSync(ctx context.Context, t *testing.T, profile string) { t.Logf("local sync path: %s", p) err := copy.Copy("./testdata/sync.test", p) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("failed to copy ./testdata/sync.test : %v", err) } err = copy.Copy("./testdata/minikube_test.pem", localTestCertPath()) if err != nil { - t.Fatalf("copy: %v", err) + t.Fatalf("failed to copy ./testdata/minikube_test.pem : %v", err) } } @@ -783,7 +783,7 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { expected, err := ioutil.ReadFile("./testdata/sync.test") if err != nil { - t.Errorf("test file not found: %v", err) + t.Errorf("failed to read test file '/testdata/sync.test' : %v", err) } if diff := cmp.Diff(string(expected), got); diff != "" { @@ -813,13 +813,13 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to check existance of %q inside minikube. args %q: %v", vp, rr.Args, err) } // Strip carriage returned by ssh got := strings.Replace(rr.Stdout.String(), "\r", "", -1) if diff := cmp.Diff(string(want), got); diff != "" { - t.Errorf("minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) + t.Errorf("failed verify pem file. minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff) } } } @@ -828,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to run minikube update-context: args %q: %v", rr.Args, err) } want := []byte("IP was already correctly configured") From 90cac63b8517b9eca2470d995486236374a37d98 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:48:36 -0700 Subject: [PATCH 32/63] improve formatting for TestGuestEnvironment --- test/integration/guest_env_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index 41344ccbc8..e59284df60 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/minikube/pkg/minikube/vmpath" ) +// TestGuestEnvironment verifies files and packges installed inside minikube ISO/Base image func TestGuestEnvironment(t *testing.T) { MaybeParallel(t) @@ -37,18 +38,18 @@ func TestGuestEnvironment(t *testing.T) { args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to start minikube: args %q: %v", rr.Args, err) } // Run as a group so that our defer doesn't happen as tests are runnings t.Run("Binaries", func(t *testing.T) { - for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService"} { + for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService", "crictl", "podman", "docker"} { pkg := pkg t.Run(pkg, func(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Args, err) } }) } @@ -67,9 +68,9 @@ func TestGuestEnvironment(t *testing.T) { mount := mount t.Run(mount, func(t *testing.T) { t.Parallel() - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) + rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Args, err) } }) } From 73a9653c80a5ec2b5efbcf9c9b72245c3dcc1c91 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 21:52:00 -0700 Subject: [PATCH 33/63] improve logging for gvisor test --- test/integration/gvisor_addon_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index d69f1d205b..6e20c249c6 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -50,7 +50,7 @@ func TestGvisorAddon(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed to start minikube: args %q: %v", rr.Args, err) } // If it exists, include a locally built gvisor image @@ -65,7 +65,7 @@ func TestGvisorAddon(t *testing.T) { } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Fatalf("waiting for gvisor controller to be up: %v", err) + t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err) } // Create an untrusted workload @@ -80,29 +80,29 @@ func TestGvisorAddon(t *testing.T) { } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for nginx pod: %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waitinf for gvisor pod: %v", err) } // Ensure that workloads survive a restart rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("faild stopping minikube. args %q : %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { - t.Errorf("waiting for gvisor controller to be up: %v", err) + t.Errorf("failed waiting for 'gvisor controller' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'nginx' pod : %v", err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil { - t.Errorf("nginx: %v", err) + t.Errorf("failed waiting for 'gvisor' pod : %v", err) } } From acc951033b9a3a8a86b65659ee6c54ec578fe6d0 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:10:32 -0700 Subject: [PATCH 34/63] improve test logs for start_stop_delete --- test/integration/start_stop_delete_test.go | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index e948f4b6ef..8c6ab2232f 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Args, err) } if !strings.Contains(tc.name, "cni") { @@ -101,43 +101,43 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Args, err) } // The none driver never really stops if !NoneDriver() { got := Status(ctx, t, Target(), profile, "Host") if got != state.Stopped.String() { - t.Errorf("post-stop host status = %q; want = %q", got, state.Stopped) + t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got) } } // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to enable an addon while minikube is stopped. args %q: ", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("Failed to start minikube after stop -Second Start-. args %q: %v", rr.Args, err) } if strings.Contains(tc.name, "cni") { t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(") } else { if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)); err != nil { - t.Fatalf("post-stop-start pod wait: %v", err) + t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err) } if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(4)); err != nil { - t.Fatalf("post-stop-start addon wait: %v", err) + t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err) } } got := Status(ctx, t, Target(), profile, "Host") if got != state.Running.String() { - t.Errorf("post-start host status = %q; want = %q", got, state.Running) + t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got) } if !NoneDriver() { @@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed to clean up: args %q: %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile)) @@ -158,7 +158,7 @@ func TestStartStop(t *testing.T) { t.Logf("config context error: %v (may be ok)", err) } if rr.ExitCode != 1 { - t.Errorf("wanted exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) + t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output()) } } }) @@ -250,14 +250,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Args, err) } jv := map[string][]struct { Tags []string `json:"repoTags"` }{} err = json.Unmarshal(rr.Stdout.Bytes(), &jv) if err != nil { - t.Errorf("images unmarshal: %v", err) + t.Errorf("failed to decode images json %v. output: %q", err, rr.Output()) } found := map[string]bool{} for _, img := range jv["images"] { @@ -274,7 +274,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version } want, err := images.Kubeadm("", version) if err != nil { - t.Errorf("kubeadm images: %v", version) + t.Errorf("failed to get kubeadm images for %s : %v", version, err) } gotImages := []string{} for k := range found { From dc5dd62b58f029609e6cfa38c503d21f379bf5b0 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:15:24 -0700 Subject: [PATCH 35/63] lint --- test/integration/aaa_download_only_test.go | 6 +++--- test/integration/addons_test.go | 2 +- test/integration/functional_test.go | 10 +++++----- test/integration/start_stop_delete_test.go | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index cf6239cd9c..2c75e0d47e 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -156,9 +156,9 @@ func TestDownloadOnlyKic(t *testing.T) { args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"} args = append(args, StartArgs()...) - rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) - if err != nil { - t.Errorf("start with download only failed %q : %v:\n%s", args, err) + + if _, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err != nil { + t.Errorf("start with download only failed %q : %v", args, err) } // Make sure the downloaded image tarball exists diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 43ad112651..3fa3a63d4c 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -179,7 +179,7 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) } if rr.Stderr.String() != "" { - t.Errorf("expected stderr to be -empty- but got: *%q*", rr.Args, rr.Stderr) + t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Args) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index f9c4b3b440..aa9ac58eb0 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -598,11 +598,11 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("% failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error).", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { - t.Logf("%s failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error)", rr.Args, err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { @@ -656,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to parse %q: %v", endpoint, err) } if u.Scheme != "http" { - t.Fatalf("expected scheme to be -'http'- got scheme: *%q*", "http", u.Scheme) + t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme) } t.Logf("url: %s", endpoint) @@ -665,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { t.Fatalf("get failed: %v\nresp: %v", err, resp) } if resp.StatusCode != http.StatusOK { - t.Fatalf("expeced status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) + t.Fatalf("expected status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode) } } @@ -813,7 +813,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("failed to check existance of %q inside minikube. args %q: %v", vp, rr.Args, err) + t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Args, err) } // Strip carriage returned by ssh diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 8c6ab2232f..21ca1d57ed 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -115,13 +115,13 @@ func TestStartStop(t *testing.T) { // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable an addon while minikube is stopped. args %q: ", rr.Args, err) + t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Args, err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("Failed to start minikube after stop -Second Start-. args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Args, err) } if strings.Contains(tc.name, "cni") { From 2e64eb795227911aee5535f81534245be34807ce Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:21:19 -0700 Subject: [PATCH 36/63] convert all rr.Args to rr.Command() --- test/integration/aaa_download_only_test.go | 4 +- test/integration/aab_offline_test.go | 2 +- test/integration/addons_test.go | 40 +++++----- test/integration/docker_test.go | 6 +- test/integration/fn_mount_cmd.go | 10 +-- test/integration/fn_pvc.go | 2 +- test/integration/fn_tunnel_cmd.go | 4 +- test/integration/functional_test.go | 92 +++++++++++----------- test/integration/guest_env_test.go | 6 +- test/integration/gvisor_addon_test.go | 14 ++-- test/integration/none_test.go | 8 +- test/integration/start_stop_delete_test.go | 26 +++--- test/integration/version_upgrade_test.go | 8 +- 13 files changed, 111 insertions(+), 111 deletions(-) diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 2c75e0d47e..27442b31cf 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) if err != nil { - t.Errorf("failed to delete all. args: %q : %v", rr.Args, err) + t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err) } }) // Delete should always succeed, even if previously partially or fully deleted. @@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) { } rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("failed to delete. args: %q: %v", rr.Args, err) + t.Errorf("failed to delete. args: %q: %v", rr.Command(), err) } }) }) diff --git a/test/integration/aab_offline_test.go b/test/integration/aab_offline_test.go index fb1cdb710f..c635ef09ef 100644 --- a/test/integration/aab_offline_test.go +++ b/test/integration/aab_offline_test.go @@ -53,7 +53,7 @@ func TestOffline(t *testing.T) { rr, err := Run(t, c) if err != nil { // Fatal so that we may collect logs before stop/delete steps - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } }) } diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 3fa3a63d4c..e90249a49e 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -43,7 +43,7 @@ func TestAddons(t *testing.T) { args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Parallelized tests @@ -69,15 +69,15 @@ func TestAddons(t *testing.T) { // Assert that disable/enable works offline rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Errorf("failed to stop minikube. args %q : %v", rr.Args, err) + t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Args, err) + t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Args, err) + t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err) } } @@ -100,11 +100,11 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml"))) if err != nil { - t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml"))) if err != nil { - t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Args, err) + t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil { @@ -121,10 +121,10 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -135,7 +135,7 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable ingress addon. args %q : %v", rr.Args, err) + t.Errorf("failed to disable ingress addon. args %q : %v", rr.Command(), err) } } @@ -161,12 +161,12 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { // Test from inside the cluster (no curl available on busybox) rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "po", "-l", "run=registry-test", "--now")) if err != nil { - t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Args, err) + t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local")) if err != nil { - t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Args, err) + t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Command(), err) } want := "HTTP/1.1 200" if !strings.Contains(rr.Stdout.String(), want) { @@ -176,10 +176,10 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { // Test from outside the cluster rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip")) if err != nil { - t.Fatalf("failed run minikube ip. args %q : %v", rr.Args, err) + t.Fatalf("failed run minikube ip. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { - t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Args) + t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Command()) } endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000) @@ -205,7 +205,7 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable registry addon. args %q: %v", rr.Args, err) + t.Errorf("failed to disable registry addon. args %q: %v", rr.Command(), err) } } @@ -232,10 +232,10 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -247,7 +247,7 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Args, err) + t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Command(), err) } } @@ -283,10 +283,10 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) return err } if rr.Stderr.String() != "" { - t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr) + t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr) } if !strings.Contains(rr.Stdout.String(), want) { - return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want) + return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want) } return nil } @@ -297,6 +297,6 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1")) if err != nil { - t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Args, err) + t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err) } } diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go index da26300dd5..dd0a27de06 100644 --- a/test/integration/docker_test.go +++ b/test/integration/docker_test.go @@ -39,12 +39,12 @@ func TestDockerFlags(t *testing.T) { args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube with args: %q : %v", rr.Args, err) + t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager")) if err != nil { - t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} { @@ -55,7 +55,7 @@ func TestDockerFlags(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager")) if err != nil { - t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err) } for _, opt := range []string{"--debug", "--icc=true"} { if !strings.Contains(rr.Stdout.String(), opt) { diff --git a/test/integration/fn_mount_cmd.go b/test/integration/fn_mount_cmd.go index 6e292af109..915262a833 100644 --- a/test/integration/fn_mount_cmd.go +++ b/test/integration/fn_mount_cmd.go @@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Assert that we can access the mount without an error. Display for debugging. rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount)) if err != nil { - t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Args, err) + t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Command(), err) } t.Logf("guest mount directory contents\n%s", rr.Stdout) @@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { tp := filepath.Join("/mount-9p", testMarker) rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp)) if err != nil { - t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Args, err) + t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { @@ -136,7 +136,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // Start the "busybox-mount" pod. rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml"))) if err != nil { - t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Args, err) + t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil { @@ -157,7 +157,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/written-by-host; rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount")) if err != nil { - t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Args, err) + t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Command(), err) } if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) { t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest) @@ -169,7 +169,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) { // test that file written from host was read in by the pod via cat /mount-9p/fromhost; rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp)) if err != nil { - t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Args, err) + t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Command(), err) } if runtime.GOOS == "windows" { diff --git a/test/integration/fn_pvc.go b/test/integration/fn_pvc.go index c387a1750b..9cca92cc47 100644 --- a/test/integration/fn_pvc.go +++ b/test/integration/fn_pvc.go @@ -64,7 +64,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st // Now create a testpvc rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml"))) if err != nil { - t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Args, err) + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err) } checkStoragePhase := func() error { diff --git a/test/integration/fn_tunnel_cmd.go b/test/integration/fn_tunnel_cmd.go index dffb247fb4..8f43a9a5fd 100644 --- a/test/integration/fn_tunnel_cmd.go +++ b/test/integration/fn_tunnel_cmd.go @@ -69,7 +69,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { // Start the "nginx" pod. rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil { t.Fatalf("wait: %v", err) @@ -97,7 +97,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout) } diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index aa9ac58eb0..a5ee68aa73 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) { func validateNodeLabels(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'")) if err != nil { - t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Args, err) + t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err) } expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"} for _, el := range expectedLabels { @@ -167,7 +167,7 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) { c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images") rr, err = Run(t, c) if err != nil { - t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Args, err) + t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err) } expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner" @@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { c.Env = env rr, err := Run(t, c) if err != nil { - t.Errorf("failed minikube start. args %q: %v", rr.Args, err) + t.Errorf("failed minikube start. args %q: %v", rr.Command(), err) } want := "Found network options:" @@ -210,7 +210,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) { func validateKubeContext(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context")) if err != nil { - t.Errorf("failed to get current-context. args %q : %v", rr.Args, err) + t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), profile) { t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String()) @@ -221,7 +221,7 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) { func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A")) if err != nil { - t.Errorf("failed to get kubectl pods: args %q : %v", rr.Args, err) + t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command()) @@ -237,7 +237,7 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"} rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...)) if err != nil { - t.Fatalf("failed to get pods. args %q: %v", rr.Args, err) + t.Fatalf("failed to get pods. args %q: %v", rr.Command(), err) } } @@ -245,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) func validateComponentHealth(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json")) if err != nil { - t.Fatalf("failed to get components. args %q: %v", rr.Args, err) + t.Fatalf("failed to get components. args %q: %v", rr.Command(), err) } cs := api.ComponentStatusList{} d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())) if err := d.Decode(&cs); err != nil { - t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Args, err) + t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err) } for _, i := range cs.Items { @@ -270,41 +270,41 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string) func validateStatusCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) if err != nil { - t.Errorf("failed to run minikube status. args %q : %v", rr.Args, err) + t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err) } // Custom format rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}")) if err != nil { - t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Args, err) + t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Command(), err) } re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)` match, _ := regexp.MatchString(re, rr.Stdout.String()) if !match { - t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Args, rr.Output()) + t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Command(), rr.Output()) } // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json")) if err != nil { - t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Args, err) + t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Args, err) + t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err) } if _, ok := jsonObject["Host"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Host") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Host") } if _, ok := jsonObject["Kubelet"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubelet") } if _, ok := jsonObject["APIServer"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "APIServer") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "APIServer") } if _, ok := jsonObject["Kubeconfig"]; !ok { - t.Errorf("%q failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig") + t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubeconfig") } } @@ -350,7 +350,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { func validateDNS(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)) @@ -410,21 +410,21 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Args, err) + t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err) } } }) t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Args, err) + t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err) } }) t.Run("list", func(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list")) if err != nil { - t.Errorf("failed to do cache list. args %q: %v", rr.Args, err) + t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output()) @@ -491,7 +491,7 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { args := append([]string{"-p", profile, "config"}, tc.args...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil && tc.wantErr == "" { - t.Errorf("failed to config minikube. args %q : %v", rr.Args, err) + t.Errorf("failed to config minikube. args %q : %v", rr.Command(), err) } got := strings.TrimSpace(rr.Stdout.String()) @@ -509,7 +509,7 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) { func validateLogsCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} { if !strings.Contains(rr.Stdout.String(), word) { @@ -525,16 +525,16 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { nonexistentProfile := "lis" rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } var profileJSON map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } for profileK := range profileJSON { for _, p := range profileJSON[profileK] { @@ -550,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // List profiles rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list")) if err != nil { - t.Errorf("failed to list profiles: args %q : %v", rr.Args, err) + t.Errorf("failed to list profiles: args %q : %v", rr.Command(), err) } // Table output @@ -564,7 +564,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) + t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } }) @@ -572,12 +572,12 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { // Json output rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json")) if err != nil { - t.Errorf("failed to list profiles with json format. args %q: %v", rr.Args, err) + t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err) } var jsonObject map[string][]map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) if err != nil { - t.Errorf("failed to decode json from profile list: args %q: %v", rr.Args, err) + t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err) } validProfiles := jsonObject["valid"] profileExists := false @@ -588,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { } } if !profileExists { - t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Args) + t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command()) } }) @@ -598,11 +598,11 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) { func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node")) if err != nil { - t.Logf("%q failed: %v (may not be an error).", rr.Args, err) + t.Logf("%q failed: %v (may not be an error).", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080")) if err != nil { - t.Logf("%q failed: %v (may not be an error)", rr.Args, err) + t.Logf("%q failed: %v (may not be an error)", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil { @@ -611,7 +611,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list")) if err != nil { - t.Errorf("failed to do service list. args %q : %v", rr.Args, err) + t.Errorf("failed to do service list. args %q : %v", rr.Command(), err) } if !strings.Contains(rr.Stdout.String(), "hello-node") { t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String()) @@ -620,7 +620,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { // Test --https --url mode rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node")) if err != nil { - t.Fatalf("failed to get service url. args %q : %v", rr.Args, err) + t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err) } if rr.Stderr.String() != "" { t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr) @@ -638,16 +638,16 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) { // Test --format=IP rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}")) if err != nil { - t.Errorf("failed to get service url with custom format. args %q: %v", rr.Args, err) + t.Errorf("failed to get service url with custom format. args %q: %v", rr.Command(), err) } if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() { - t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Args) + t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command()) } // Test a regular URLminikube rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url")) if err != nil { - t.Errorf("failed to get service url. args: %q: %v", rr.Args, err) + t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err) } endpoint = strings.TrimSpace(rr.Stdout.String()) @@ -674,7 +674,7 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Table output rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list")) if err != nil { - t.Errorf("failed to do addon list: args %q : %v", rr.Args, err) + t.Errorf("failed to do addon list: args %q : %v", rr.Command(), err) } for _, a := range []string{"dashboard", "ingress", "ingress-dns"} { if !strings.Contains(rr.Output(), a) { @@ -685,7 +685,7 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) { // Json output rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json")) if err != nil { - t.Errorf("failed to do addon list with json output. args %q: %v", rr.Args, err) + t.Errorf("failed to do addon list with json output. args %q: %v", rr.Command(), err) } var jsonObject map[string]interface{} err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject) @@ -702,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { want := "hello\n" rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello"))) if err != nil { - t.Errorf("failed to run an ssh command. args %q : %v", rr.Args, err) + t.Errorf("failed to run an ssh command. args %q : %v", rr.Command(), err) } if rr.Stdout.String() != want { - t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Args) + t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Command()) } } @@ -713,7 +713,7 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) { func validateMySQL(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml"))) if err != nil { - t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Args, err) + t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err) } names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10)) @@ -776,7 +776,7 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } got := rr.Stdout.String() t.Logf("file sync test content: %s", got) @@ -813,7 +813,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { t.Logf("Checking for existence of %s within VM", vp) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp))) if err != nil { - t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Args, err) + t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Command(), err) } // Strip carriage returned by ssh @@ -828,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) { func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2")) if err != nil { - t.Errorf("failed to run minikube update-context: args %q: %v", rr.Args, err) + t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err) } want := []byte("IP was already correctly configured") diff --git a/test/integration/guest_env_test.go b/test/integration/guest_env_test.go index e59284df60..201d188ec2 100644 --- a/test/integration/guest_env_test.go +++ b/test/integration/guest_env_test.go @@ -38,7 +38,7 @@ func TestGuestEnvironment(t *testing.T) { args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube: args %q: %v", rr.Args, err) + t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err) } // Run as a group so that our defer doesn't happen as tests are runnings @@ -49,7 +49,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg))) if err != nil { - t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Args, err) + t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Command(), err) } }) } @@ -70,7 +70,7 @@ func TestGuestEnvironment(t *testing.T) { t.Parallel() rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount))) if err != nil { - t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Args, err) + t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Command(), err) } }) } diff --git a/test/integration/gvisor_addon_test.go b/test/integration/gvisor_addon_test.go index 6e20c249c6..d5744eeafe 100644 --- a/test/integration/gvisor_addon_test.go +++ b/test/integration/gvisor_addon_test.go @@ -50,18 +50,18 @@ func TestGvisorAddon(t *testing.T) { startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed to start minikube: args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err) } // If it exists, include a locally built gvisor image rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", "gcr.io/k8s-minikube/gvisor-addon:2")) if err != nil { - t.Logf("%s failed: %v (won't test local image)", rr.Args, err) + t.Logf("%s failed: %v (won't test local image)", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "gvisor")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { @@ -71,12 +71,12 @@ func TestGvisorAddon(t *testing.T) { // Create an untrusted workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Create gvisor workload rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil { @@ -89,12 +89,12 @@ func TestGvisorAddon(t *testing.T) { // Ensure that workloads survive a restart rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile)) if err != nil { - t.Fatalf("faild stopping minikube. args %q : %v", rr.Args, err) + t.Fatalf("faild stopping minikube. args %q : %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Args, err) + t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Command(), err) } if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil { t.Errorf("failed waiting for 'gvisor controller' pod : %v", err) diff --git a/test/integration/none_test.go b/test/integration/none_test.go index 873465d5ef..ed77814dee 100644 --- a/test/integration/none_test.go +++ b/test/integration/none_test.go @@ -46,22 +46,22 @@ func TestChangeNoneUser(t *testing.T) { startArgs := append([]string{"CHANGE_MINIKUBE_NONE_USER=true", Target(), "start", "--wait=false"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...)) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "status")) if err != nil { - t.Errorf("%s failed: %v", rr.Args, err) + t.Errorf("%s failed: %v", rr.Command(), err) } username := os.Getenv("SUDO_USER") diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index 21ca1d57ed..dd106484c9 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Args, err) + t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err) } if !strings.Contains(tc.name, "cni") { @@ -101,7 +101,7 @@ func TestStartStop(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3")) if err != nil { - t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Args, err) + t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err) } // The none driver never really stops @@ -115,13 +115,13 @@ func TestStartStop(t *testing.T) { // Enable an addon to assert it comes up afterwards rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile)) if err != nil { - t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Args, err) + t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { // Explicit fatal so that failures don't move directly to deletion - t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Args, err) + t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err) } if strings.Contains(tc.name, "cni") { @@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) { // Normally handled by cleanuprofile, but not fatal there rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) if err != nil { - t.Errorf("failed to clean up: args %q: %v", rr.Args, err) + t.Errorf("failed to clean up: args %q: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile)) @@ -182,14 +182,14 @@ func TestStartStopWithPreload(t *testing.T) { rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Now, pull the busybox image into the VMs docker daemon image := "busybox" rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // Restart minikube with v1.17.3, which has a preloaded tarball @@ -199,11 +199,11 @@ func TestStartStopWithPreload(t *testing.T) { startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion)) rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), image) { t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output()) @@ -217,7 +217,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) { // schedule a pod to assert persistence rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml"))) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } // 8 minutes, because 4 is not enough for images to pull in all cases. @@ -250,7 +250,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json")) if err != nil { - t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Args, err) + t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Command(), err) } jv := map[string][]struct { Tags []string `json:"repoTags"` @@ -293,7 +293,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got := Status(ctx, t, Target(), profile, "APIServer") @@ -308,7 +308,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) { rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1")) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } got = Status(ctx, t, Target(), profile, "APIServer") diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 20d131d5ef..2eb4eaabb6 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -82,7 +82,7 @@ func TestVersionUpgrade(t *testing.T) { rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile)) if err != nil { - t.Fatalf("%s failed: %v", rr.Args, err) + t.Fatalf("%s failed: %v", rr.Command(), err) } rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}")) @@ -97,7 +97,7 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Args, err) + t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err) } s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json")) @@ -121,12 +121,12 @@ func TestVersionUpgrade(t *testing.T) { args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { - t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args) + t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("start and already started minikube failed. args: %q : %v", rr.Args, err) + t.Errorf("start and already started minikube failed. args: %q : %v", rr.Command(), err) } } From fffac252628281898b757119d3d88e0978c0deea Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:45:29 -0700 Subject: [PATCH 37/63] indent test outputs --- test/integration/helpers.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index bac683ea87..02298b3a8e 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -63,14 +63,26 @@ func (rr RunResult) Command() string { return sb.String() } +// indentLines indents every line in a bytes.Buffer and returns it as string +func indentLines(b *bytes.Buffer) string { + scanner := bufio.NewScanner(b) + var lines string + for scanner.Scan() { + lines = lines + "\t" + scanner.Text() + "\n" + } + return lines +} + // Output returns human-readable output for an execution result func (rr RunResult) Output() string { var sb strings.Builder + if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes())) + sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) } + if rr.Stderr.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes())) + sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) } return sb.String() } From 2ced39c74561b3e327e11263817e3b71010ca5fa Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 22:54:43 -0700 Subject: [PATCH 38/63] add \n for stdout too --- test/integration/helpers.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 02298b3a8e..8ac021862a 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -76,11 +76,9 @@ func indentLines(b *bytes.Buffer) string { // Output returns human-readable output for an execution result func (rr RunResult) Output() string { var sb strings.Builder - if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) + sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) } - if rr.Stderr.Len() > 0 { sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) } From 0dc9c2af70f5df7024740ef515395ce41e4b6109 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:14:10 -0700 Subject: [PATCH 39/63] install kubectl on github action machines --- .github/workflows/main.yml | 48 +++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4a0bd5b0aa..d5feef452d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -81,13 +81,26 @@ jobs: GOPOGH_RESULT: "" SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 - steps: + steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" - name: Install lz4 shell: bash run: | @@ -157,6 +170,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 needs: [build_minikube] steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Install lz4 shell: bash run: | @@ -165,9 +183,17 @@ jobs: - name: Docker Info shell: bash run: | - docker info || true + echo "--------------------------" docker version || true + echo "--------------------------" + docker info || true + echo "--------------------------" + docker system df || true + echo "--------------------------" + docker system info || true + echo "--------------------------" docker ps || true + echo "--------------------------" - name: Install gopogh shell: bash run: | @@ -232,6 +258,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-16.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -304,6 +335,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: + - name: Install kubectl + shell: bash + run: | + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -376,11 +412,11 @@ jobs: SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643 runs-on: ubuntu-18.04 steps: - - name: Install lz4 + - name: Install kubectl shell: bash run: | - sudo apt-get update -qq - sudo apt-get -qq -y install liblz4-tool + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + sudo install kubectl /usr/local/bin/kubectl - name: Install podman shell: bash run: | From 9229b35da97211504477fb8356e07fdb36db200c Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:34:05 -0700 Subject: [PATCH 40/63] print kubectl version to make sure it is installed --- .github/workflows/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d5feef452d..0fb9d83185 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -87,6 +87,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Docker Info shell: bash run: | @@ -175,6 +176,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Install lz4 shell: bash run: | @@ -263,6 +265,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -340,6 +343,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -417,6 +421,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl + kubectl version - name: Install podman shell: bash run: | From d2a7b8b748d10bd1ea64670d7853804e3504b76f Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 25 Mar 2020 23:45:27 -0700 Subject: [PATCH 41/63] kubectl version --- .github/workflows/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 0fb9d83185..1ce020cbbe 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -87,7 +87,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Docker Info shell: bash run: | @@ -176,7 +176,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Install lz4 shell: bash run: | @@ -265,7 +265,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -343,7 +343,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true # conntrack is required for kubernetes 1.18 and higher # socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon - name: Install tools for none @@ -421,7 +421,7 @@ jobs: run: | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl sudo install kubectl /usr/local/bin/kubectl - kubectl version + kubectl version --client=true - name: Install podman shell: bash run: | From 1c8d5806825ecbaeeb752080fd89824450699815 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:14:07 -0700 Subject: [PATCH 42/63] add docs for docker driver --- .../en/docs/Reference/Drivers/docker.md | 21 ++++-------- .../en/docs/Reference/Drivers/hyperv.md | 1 - .../Drivers/includes/check_baremetal.inc | 7 ++++ .../Drivers/includes/check_container.inc | 9 ++++++ .../includes/check_virtualization_linux.inc | 11 +++++++ .../includes/check_virtualization_windows.inc | 19 +++++++++++ .../Drivers/includes/docker_usage.inc | 16 ++++++++++ .../content/en/docs/Reference/Drivers/kvm2.md | 5 +++ site/content/en/docs/Start/linux.md | 28 +++++++++------- site/content/en/docs/Start/macos.md | 3 ++ site/content/en/docs/Start/windows.md | 32 ++++++------------- 11 files changed, 102 insertions(+), 50 deletions(-) create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_container.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc create mode 100644 site/content/en/docs/Reference/Drivers/includes/docker_usage.inc diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index f44261fbad..2fcd751123 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -4,28 +4,19 @@ linkTitle: "docker" weight: 3 date: 2020-02-05 description: > - Docker driver (EXPERIMENTAL) + Docker driver --- ## Overview -The Docker driver is an experimental VM-free driver that ships with minikube v1.7. +The Docker driver is a VM-free driver. + +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} -This driver was inspired by the [kind project](https://kind.sigs.k8s.io/), and uses a modified version of its base image. ## Special features -No hypervisor required when run on Linux. +- Cross platform (linux, macos, windows) +- No hypervisor required when run on Linux. -## Limitations -As an experimental driver, not all commands are supported on all platforms. Notably: `mount,` `service`, `tunnel`, and others. Most of these limitations will be addressed by minikube v1.8 (March 2020) - -## Issues - -* [Full list of open 'kic-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fkic-driver) - -## Troubleshooting - -* Run `minikube start --alsologtostderr -v=1` to debug crashes -* If your docker is too slow on mac os try [Improving docker performance](https://docs.docker.com/docker-for-mac/osxfs-caching/) diff --git a/site/content/en/docs/Reference/Drivers/hyperv.md b/site/content/en/docs/Reference/Drivers/hyperv.md index 909f1e03f8..3595a2bb9d 100644 --- a/site/content/en/docs/Reference/Drivers/hyperv.md +++ b/site/content/en/docs/Reference/Drivers/hyperv.md @@ -7,7 +7,6 @@ date: 2018-08-05 description: > Microsoft Hyper-V driver --- - ## Overview Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows. diff --git a/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc b/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc new file mode 100644 index 0000000000..da6797ff8c --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc @@ -0,0 +1,7 @@ +To use baremetal driver (none driver). verify that your operating system is Linux and also have 'systemd' installed. + +```shell +pidof systemd && echo "yes" || echo "no" +``` +If the above command outputs "no": +Your system is not suitable for none driver. \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/check_container.inc b/site/content/en/docs/Reference/Drivers/includes/check_container.inc new file mode 100644 index 0000000000..c189d8a2ac --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_container.inc @@ -0,0 +1,9 @@ +To use container drivers, verify that your system has either have 'docker' or 'podman' installed. +```shell +docker version +``` +or + +```shell +podman version +``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc new file mode 100644 index 0000000000..3f60068016 --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_linux.inc @@ -0,0 +1,11 @@ +To use VM drivers, verify that your system has virtualization support enabled: + +```shell +egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no +``` + +If the above command outputs "no": + +- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver +- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled + diff --git a/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc new file mode 100644 index 0000000000..14812b61ec --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/check_virtualization_windows.inc @@ -0,0 +1,19 @@ +To check if virtualization is supported, run the following command on your Windows terminal or command prompt. + +```shell +systeminfo +``` +If you see the following output, virtualization is supported: + +```shell +Hyper-V Requirements: VM Monitor Mode Extensions: Yes + Virtualization Enabled In Firmware: Yes + Second Level Address Translation: Yes + Data Execution Prevention Available: Yes +``` + +If you see the following output, your system already has a Hypervisor installed and you can skip the next step. + +```shell +Hyper-V Requirements: A hypervisor has been detected. +``` \ No newline at end of file diff --git a/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc b/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc new file mode 100644 index 0000000000..df96d517ec --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/docker_usage.inc @@ -0,0 +1,16 @@ +## Install Docker + +- [Docker Desktop](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=docker +``` +To make docker the default driver: + +```shell +minikube config set driver docker +``` diff --git a/site/content/en/docs/Reference/Drivers/kvm2.md b/site/content/en/docs/Reference/Drivers/kvm2.md index df13a3f95d..7e7c80a081 100644 --- a/site/content/en/docs/Reference/Drivers/kvm2.md +++ b/site/content/en/docs/Reference/Drivers/kvm2.md @@ -8,12 +8,17 @@ description: > Linux KVM (Kernel-based Virtual Machine) driver --- + ## Overview [KVM (Kernel-based Virtual Machine)](https://www.linux-kvm.org/page/Main_Page) is a full virtualization solution for Linux on x86 hardware containing virtualization extensions. To work with KVM, minikube uses the [libvirt virtualization API](https://libvirt.org/) {{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} +## Check virtualization support + +{{% readfile file="/docs/Reference/Drivers/includes/virtualization_check_linux.inc" %}} + ## Special features The `minikube start` command supports 3 additional kvm specific flags: diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index b52b1690d8..8534cff5e2 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -39,28 +39,32 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la {{% /tab %}} {{% /tabs %}} -## Hypervisor Setup - -Verify that your system has virtualization support enabled: - -```shell -egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no -``` - -If the above command outputs "no": - -- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver -- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled +## Driver Setup {{% tabs %}} +{{% tab "Docker" %}} +## Check container support +{{% readfile file="/docs/Reference/Drivers/includes/check_container.inc" %}} + +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} {{% tab "KVM" %}} +## Check virtualization support +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}} {{% /tab %}} {{% tab "VirtualBox" %}} +## Check virtualization support +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} {{% /tab %}} {{% tab "None (bare-metal)" %}} +## Check baremetal support +{{% readfile file="/docs/Reference/Drivers/includes/check_baremetal_linux.inc" %}} + If you are already running minikube from inside a VM, it is possible to skip the creation of an additional VM layer by using the `none` driver. {{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} diff --git a/site/content/en/docs/Start/macos.md b/site/content/en/docs/Start/macos.md index 3c41e3a9b9..1051d8a1a1 100644 --- a/site/content/en/docs/Start/macos.md +++ b/site/content/en/docs/Start/macos.md @@ -50,6 +50,9 @@ brew upgrade minikube ## Hypervisor Setup {{% tabs %}} +{{% tab "Docker" %}} +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} {{% tab "Hyperkit" %}} {{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}} {{% /tab %}} diff --git a/site/content/en/docs/Start/windows.md b/site/content/en/docs/Start/windows.md index 02ff403f99..2fe1d79a5f 100644 --- a/site/content/en/docs/Start/windows.md +++ b/site/content/en/docs/Start/windows.md @@ -7,8 +7,6 @@ weight: 3 ### Prerequisites * Windows 8 or above -* A hypervisor, such as Hyper-V or VirtualBox -* Hardware virtualization support must be enabled in BIOS * 4GB of RAM ### Installation @@ -30,33 +28,23 @@ After it has installed, close the current CLI session and reopen it. minikube sh {{% /tab %}} {{% /tabs %}} -## Hypervisor Setup -To check if virtualization is supported, run the following command on your Windows terminal or command prompt. - -```shell -systeminfo -``` -If you see the following output, virtualization is supported: - -```shell -Hyper-V Requirements: VM Monitor Mode Extensions: Yes - Virtualization Enabled In Firmware: Yes - Second Level Address Translation: Yes - Data Execution Prevention Available: Yes -``` - -If you see the following output, your system already has a Hypervisor installed and you can skip the next step. - -```shell -Hyper-V Requirements: A hypervisor has been detected. -``` {{% tabs %}} +{{% tab "Docker" %}} +{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} +{{% /tab %}} + {{% tab "Hyper-V" %}} +## Check Hypervisor +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}} {{% /tab %}} {{% tab "VirtualBox" %}} +## Check Hypervisor +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}} + {{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}} {{% /tab %}} {{% /tabs %}} From 4f2d3de0552ca435d868f7aafbc15a3b37ae86dd Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:16:16 -0700 Subject: [PATCH 43/63] rename inc page --- .../includes/{check_baremetal.inc => check_baremetal_linux.inc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename site/content/en/docs/Reference/Drivers/includes/{check_baremetal.inc => check_baremetal_linux.inc} (100%) diff --git a/site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc b/site/content/en/docs/Reference/Drivers/includes/check_baremetal_linux.inc similarity index 100% rename from site/content/en/docs/Reference/Drivers/includes/check_baremetal.inc rename to site/content/en/docs/Reference/Drivers/includes/check_baremetal_linux.inc From 6006e1416f49096ac859d9a64cc8a19b12b0908a Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:17:52 -0700 Subject: [PATCH 44/63] rename broken link --- site/content/en/docs/Reference/Drivers/kvm2.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/kvm2.md b/site/content/en/docs/Reference/Drivers/kvm2.md index 7e7c80a081..52102bb83e 100644 --- a/site/content/en/docs/Reference/Drivers/kvm2.md +++ b/site/content/en/docs/Reference/Drivers/kvm2.md @@ -17,7 +17,7 @@ description: > ## Check virtualization support -{{% readfile file="/docs/Reference/Drivers/includes/virtualization_check_linux.inc" %}} +{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}} ## Special features From 7d9d62bef690f13cc4055f14c7c39952c0690283 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:21:30 -0700 Subject: [PATCH 45/63] fix the heading sizer --- .../docs/Reference/Drivers/includes/check_container.inc | 9 --------- site/content/en/docs/Start/includes/post_install.inc | 2 +- site/content/en/docs/Start/linux.md | 2 -- 3 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 site/content/en/docs/Reference/Drivers/includes/check_container.inc diff --git a/site/content/en/docs/Reference/Drivers/includes/check_container.inc b/site/content/en/docs/Reference/Drivers/includes/check_container.inc deleted file mode 100644 index c189d8a2ac..0000000000 --- a/site/content/en/docs/Reference/Drivers/includes/check_container.inc +++ /dev/null @@ -1,9 +0,0 @@ -To use container drivers, verify that your system has either have 'docker' or 'podman' installed. -```shell -docker version -``` -or - -```shell -podman version -``` \ No newline at end of file diff --git a/site/content/en/docs/Start/includes/post_install.inc b/site/content/en/docs/Start/includes/post_install.inc index 5f28d307cf..595eda57d8 100644 --- a/site/content/en/docs/Start/includes/post_install.inc +++ b/site/content/en/docs/Start/includes/post_install.inc @@ -1,4 +1,4 @@ -### Getting to know Kubernetes +## Getting to know Kubernetes Once started, you can use any regular Kubernetes command to interact with your minikube cluster. For example, you can see the pod states by running: diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index 8534cff5e2..454c7c4fe5 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -44,8 +44,6 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la {{% tabs %}} {{% tab "Docker" %}} ## Check container support -{{% readfile file="/docs/Reference/Drivers/includes/check_container.inc" %}} - {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} {{% /tab %}} From 6654d4e033fbfa357a2b66622d2d46fb9293e531 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:27:23 -0700 Subject: [PATCH 46/63] update docs about memory auto select --- site/content/en/docs/Start/includes/post_install.inc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/site/content/en/docs/Start/includes/post_install.inc b/site/content/en/docs/Start/includes/post_install.inc index 595eda57d8..03ed41bb61 100644 --- a/site/content/en/docs/Start/includes/post_install.inc +++ b/site/content/en/docs/Start/includes/post_install.inc @@ -6,16 +6,16 @@ Once started, you can use any regular Kubernetes command to interact with your m kubectl get po -A ``` -### Increasing memory allocation +## Increasing memory allocation -minikube only allocates 2GB of RAM by default, which is only enough for trivial deployments. For larger +minikube auto-selects the memory size based on your system up to 6000mb. For larger deployments, increase the memory allocation using the `--memory` flag, or make the setting persistent using: ```shell -minikube config set memory 4096 +minikube config set memory 8096 ``` -### Where to go next? +## Where to go next? Visit the [examples](/docs/examples) page to get an idea of what you can do with minikube. From 844e1a083dfdf387fb05d829e5b95aaf9419df38 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:41:20 -0700 Subject: [PATCH 47/63] add podman driver docs --- .../en/docs/Reference/Drivers/docker.md | 3 +-- .../Drivers/includes/podman_usage.inc | 16 ++++++++++++ .../en/docs/Reference/Drivers/podman.md | 26 +++++++++++++++++++ site/content/en/docs/Start/linux.md | 5 ++++ 4 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 site/content/en/docs/Reference/Drivers/includes/podman_usage.inc create mode 100644 site/content/en/docs/Reference/Drivers/podman.md diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index 2fcd751123..17f3782504 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -9,13 +9,12 @@ description: > ## Overview -The Docker driver is a VM-free driver. +The Docker driver is the newest minikube driver, which runs kubernetes in container with full feature parity with VM minikube. {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} ## Special features - - Cross platform (linux, macos, windows) - No hypervisor required when run on Linux. diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc new file mode 100644 index 0000000000..9327e9d901 --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -0,0 +1,16 @@ +## Install Podman + +- [Podman](https://podman.io/getting-started/installation.html) + +## Usage + +Start a cluster using the docker driver: + +```shell +minikube start --driver=podman +``` +To make docker the default driver: + +```shell +minikube config set driver podman +``` diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md new file mode 100644 index 0000000000..7ef9657c1d --- /dev/null +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -0,0 +1,26 @@ +--- +title: "podman" +linkTitle: "podman" +weight: 3 +date: 2020-03-26 +description: > + Podman driver +--- + +## Overview + +The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](docs/reference/drivers/docker) driver. +podman driver is currently experimental. +and only supported on Linux and MacOs (with a remote podman server) + + +## try with CRI-O run time. +```shell +minikube start --driver=podman --container-runtime=cri-o +``` + + +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} + + + diff --git a/site/content/en/docs/Start/linux.md b/site/content/en/docs/Start/linux.md index 454c7c4fe5..1e4fd4a9b1 100644 --- a/site/content/en/docs/Start/linux.md +++ b/site/content/en/docs/Start/linux.md @@ -67,6 +67,11 @@ If you are already running minikube from inside a VM, it is possible to skip the {{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}} {{% /tab %}} +{{% tab "Podman (experimental)" %}} +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} +{{% /tab %}} + + {{% /tabs %}} {{% readfile file="/docs/Start/includes/post_install.inc" %}} From 2635b4b0d759fc2cf24cfd1e0915b776da677780 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:44:48 -0700 Subject: [PATCH 48/63] fix wording --- site/content/en/docs/Reference/Drivers/docker.md | 2 +- .../en/docs/Reference/Drivers/includes/podman_usage.inc | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/docker.md b/site/content/en/docs/Reference/Drivers/docker.md index 17f3782504..afd0a970b7 100644 --- a/site/content/en/docs/Reference/Drivers/docker.md +++ b/site/content/en/docs/Reference/Drivers/docker.md @@ -9,7 +9,7 @@ description: > ## Overview -The Docker driver is the newest minikube driver, which runs kubernetes in container with full feature parity with VM minikube. +The Docker driver is the newest minikube driver. which runs kubernetes in container VM-free ! with full feature parity with minikube in VM. {{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}} diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc index 9327e9d901..76720f262a 100644 --- a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -1,3 +1,8 @@ +## experimental + +This is an experimental driver. please use it only for experimental reasons. +for a better kubernetes in container experience, use docker [driver](https://5e7c6ab90d754e000860cdfd--kubernetes-sigs-minikube.netlify.com/docs/reference/drivers/docker/). + ## Install Podman - [Podman](https://podman.io/getting-started/installation.html) From b7742190ab45a94204f90a7f95e1722d3ff64662 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 01:46:56 -0700 Subject: [PATCH 49/63] podman wording --- site/content/en/docs/Reference/Drivers/podman.md | 2 +- site/content/en/docs/Start/macos.md | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md index 7ef9657c1d..2b83cf3ea6 100644 --- a/site/content/en/docs/Reference/Drivers/podman.md +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -14,7 +14,7 @@ podman driver is currently experimental. and only supported on Linux and MacOs (with a remote podman server) -## try with CRI-O run time. +## Try it with CRI-O container runtime. ```shell minikube start --driver=podman --container-runtime=cri-o ``` diff --git a/site/content/en/docs/Start/macos.md b/site/content/en/docs/Start/macos.md index 1051d8a1a1..b23a81cf13 100644 --- a/site/content/en/docs/Start/macos.md +++ b/site/content/en/docs/Start/macos.md @@ -65,6 +65,9 @@ brew upgrade minikube {{% tab "VMware" %}} {{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}} {{% /tab %}} +{{% tab "Podman (experimental)" %}} +{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}} +{{% /tab %}} {{% /tabs %}} From 26bdbfe11de823abb7c5c7da3d2aefc3aadf2ff5 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:01:57 -0700 Subject: [PATCH 50/63] update references to docker podman --- site/content/en/_index.html | 8 +++++--- site/content/en/docs/Contributing/triage.md | 1 + .../en/docs/Reference/Drivers/includes/podman_usage.inc | 2 +- site/content/en/docs/Reference/Drivers/podman.md | 2 +- site/content/en/docs/Reference/disk_cache.md | 2 +- site/content/en/docs/Reference/persistent_volumes.md | 2 +- site/content/en/docs/Tutorials/continuous_integration.md | 8 ++++++-- site/content/en/docs/Tutorials/nvidia_gpu.md | 2 +- .../en/docs/Tutorials/untrusted_root_certificate.md | 6 +++--- 9 files changed, 20 insertions(+), 13 deletions(-) diff --git a/site/content/en/_index.html b/site/content/en/_index.html index 8687b84c25..05ade99efe 100644 --- a/site/content/en/_index.html +++ b/site/content/en/_index.html @@ -83,12 +83,14 @@ A single command away from reproducing your production environment, from the com {{% /blocks/feature %}} {{% blocks/feature icon="fa-thumbs-up" title="Cross-platform" %}} -- Bare-metal -- HyperKit -- Hyper-V - KVM +- Docker +- HyperKit +- Bare-metal - VirtualBox +- Hyper-V - VMware +- Podman {{% /blocks/feature %}} {{< /blocks/section >}} diff --git a/site/content/en/docs/Contributing/triage.md b/site/content/en/docs/Contributing/triage.md index 318312107b..93b3c403e5 100644 --- a/site/content/en/docs/Contributing/triage.md +++ b/site/content/en/docs/Contributing/triage.md @@ -62,6 +62,7 @@ If the issue is specific to an operating system, hypervisor, container, addon, o - `co/kvm2` - `co/none-driver` - `co/docker-driver` + - `co/podman-driver` - `co/virtualbox` **co/[kubernetes component]** - When the issue appears specific to a k8s component diff --git a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc index 76720f262a..9962043165 100644 --- a/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc +++ b/site/content/en/docs/Reference/Drivers/includes/podman_usage.inc @@ -1,7 +1,7 @@ ## experimental This is an experimental driver. please use it only for experimental reasons. -for a better kubernetes in container experience, use docker [driver](https://5e7c6ab90d754e000860cdfd--kubernetes-sigs-minikube.netlify.com/docs/reference/drivers/docker/). +for a better kubernetes in container experience, use docker [driver](https://minikube.sigs.k8s.io/docs/reference/drivers/docker). ## Install Podman diff --git a/site/content/en/docs/Reference/Drivers/podman.md b/site/content/en/docs/Reference/Drivers/podman.md index 2b83cf3ea6..17425b1e58 100644 --- a/site/content/en/docs/Reference/Drivers/podman.md +++ b/site/content/en/docs/Reference/Drivers/podman.md @@ -9,7 +9,7 @@ description: > ## Overview -The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](docs/reference/drivers/docker) driver. +The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker/) driver. podman driver is currently experimental. and only supported on Linux and MacOs (with a remote podman server) diff --git a/site/content/en/docs/Reference/disk_cache.md b/site/content/en/docs/Reference/disk_cache.md index 84d43112ef..24590727d5 100644 --- a/site/content/en/docs/Reference/disk_cache.md +++ b/site/content/en/docs/Reference/disk_cache.md @@ -4,7 +4,7 @@ linkTitle: "Disk cache" weight: 6 date: 2019-08-01 description: > - Cache Rules Everything Around Minikube + Cache Rules Everything Around minikube --- minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations: diff --git a/site/content/en/docs/Reference/persistent_volumes.md b/site/content/en/docs/Reference/persistent_volumes.md index 6b7a38b83b..02e90bc703 100644 --- a/site/content/en/docs/Reference/persistent_volumes.md +++ b/site/content/en/docs/Reference/persistent_volumes.md @@ -7,7 +7,7 @@ description: > About persistent volumes (hostPath) --- -minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. +minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below. ## A note on mounts, persistence, and minikube hosts diff --git a/site/content/en/docs/Tutorials/continuous_integration.md b/site/content/en/docs/Tutorials/continuous_integration.md index 4dad6cc9ed..1a55a58150 100644 --- a/site/content/en/docs/Tutorials/continuous_integration.md +++ b/site/content/en/docs/Tutorials/continuous_integration.md @@ -9,13 +9,13 @@ description: > ## Overview -Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. +Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. or you could alternatively use the [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker). ## Prerequisites - VM running a systemd based Linux distribution -## Tutorial +## using none driver Here is an example, that runs minikube from a non-root user, and ensures that the latest stable kubectl is installed: @@ -39,3 +39,7 @@ touch $KUBECONFIG sudo -E minikube start --driver=none ``` + +## Alternative ways + +you could alternatively use minikube's container drivers such as [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker) or [Podman](https://minikube.sigs.k8s.io/docs/reference/drivers/podman). \ No newline at end of file diff --git a/site/content/en/docs/Tutorials/nvidia_gpu.md b/site/content/en/docs/Tutorials/nvidia_gpu.md index 68846ae15b..4e9f561490 100644 --- a/site/content/en/docs/Tutorials/nvidia_gpu.md +++ b/site/content/en/docs/Tutorials/nvidia_gpu.md @@ -98,7 +98,7 @@ to expose GPUs with `--driver=kvm2`. Please don't mix these instructions. ## Why does minikube not support NVIDIA GPUs on macOS? -VM drivers supported by minikube for macOS doesn't support GPU passthrough: +drivers supported by minikube for macOS doesn't support GPU passthrough: - [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108) - [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159) diff --git a/site/content/en/docs/Tutorials/untrusted_root_certificate.md b/site/content/en/docs/Tutorials/untrusted_root_certificate.md index 77093d76f3..d1b857aa73 100644 --- a/site/content/en/docs/Tutorials/untrusted_root_certificate.md +++ b/site/content/en/docs/Tutorials/untrusted_root_certificate.md @@ -11,7 +11,7 @@ description: > Most organizations deploy their own Root Certificate and CA service inside the corporate networks. Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns. -You may install the Root Certificate into the minikube VM to access these corporate resources within the cluster. +You may install the Root Certificate into the minikube cluster to access these corporate resources within the cluster. ## Prerequisites @@ -26,13 +26,13 @@ You may install the Root Certificate into the minikube VM to access these corpor openssl x509 -inform der -in my_company.cer -out my_company.pem ``` -* You may need to delete existing minikube VM +* You may need to delete existing minikube cluster ```shell minikube delete ``` -* Copy the certificate before creating the minikube VM +* Copy the certificate before creating the minikube cluster ```shell mkdir -p $HOME/.minikube/certs From 132d1459eb6de47e72a6f60bc95b443f9b2636d9 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:09:09 -0700 Subject: [PATCH 51/63] try local search for the site --- site/config.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/site/config.toml b/site/config.toml index 091e83fe83..23f8db88b6 100644 --- a/site/config.toml +++ b/site/config.toml @@ -112,7 +112,10 @@ github_project_repo = "" github_subdir = "site" # Google Custom Search Engine ID. Remove or comment out to disable search. -gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" +# gcs_engine_id = "005331096405080631692:s7c4yfpw9sy" + +# enabling local search https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr +offlineSearch = true # User interface configuration [params.ui] From 78c8d0245cb8c5a6dcea389f9bb3c88468e67736 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 02:45:01 -0700 Subject: [PATCH 52/63] update docsy theme submodule --- site/themes/docsy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/themes/docsy b/site/themes/docsy index 493bb1a0af..3123298f5b 160000 --- a/site/themes/docsy +++ b/site/themes/docsy @@ -1 +1 @@ -Subproject commit 493bb1a0af92d1242f8396aeb1661dcd3a010db7 +Subproject commit 3123298f5b0f56b3315b55319e17a8fa6c9d98f9 From 7e799c6be34eb434fc17e6de7fed4732b0d1275b Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 26 Mar 2020 03:00:56 -0700 Subject: [PATCH 53/63] add comment to Makefile for updating docsy --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f7b45a07ed..43bb6b9cf2 100755 --- a/Makefile +++ b/Makefile @@ -625,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/ gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/ -site/themes/docsy/assets/vendor/bootstrap/package.js: +site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule git submodule update -f --init --recursive out/hugo/hugo: From c73d4ca32b26facb62f7dee27630dc376ea86ced Mon Sep 17 00:00:00 2001 From: Yang Keao Date: Thu, 26 Mar 2020 18:51:25 +0800 Subject: [PATCH 54/63] add TBF and IPSET filter to kernel config Signed-off-by: Yang Keao --- .../iso/minikube-iso/board/coreos/minikube/linux_defconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index ebf694f191..a766017e78 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_LZ4=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -25,10 +26,10 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_BPF_SYSCALL=y -CONFIG_CGROUP_BPF=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_SMP=y @@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BRIDGE=m CONFIG_NET_SCHED=y +CONFIG_NET_SCH_TBF=y CONFIG_NET_SCH_NETEM=y CONFIG_NET_SCH_INGRESS=m CONFIG_NET_CLS_U32=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_IPSET=y CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_BPF=m From 3adfa8304c945773e91b07a249930743e3a29c60 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 06:30:20 -0700 Subject: [PATCH 55/63] Turn log message back into comment --- pkg/minikube/machine/fix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/machine/fix.go b/pkg/minikube/machine/fix.go index 05cbb8eec5..9a96df2afb 100644 --- a/pkg/minikube/machine/fix.go +++ b/pkg/minikube/machine/fix.go @@ -77,8 +77,8 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host. return h, err } + // Avoid reprovisioning "none" driver because provision.Detect requires SSH if !driver.BareMetal(h.Driver.DriverName()) { - glog.Infof("%s is local, skipping re-provision as it requires SSH", driverName) e := engineOptions(cc) h.HostOptions.EngineOptions.Env = e.Env err = provisionDockerMachine(h) From d2c71b5363bc02b37f1e89ddc6f78a35e0d40512 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 08:11:39 -0700 Subject: [PATCH 56/63] Fix testing regression which broke stdout reads --- test/integration/helpers.go | 8 ++++---- test/integration/version_upgrade_test.go | 7 +++++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/test/integration/helpers.go b/test/integration/helpers.go index 8ac021862a..89eef10dc2 100644 --- a/test/integration/helpers.go +++ b/test/integration/helpers.go @@ -64,8 +64,8 @@ func (rr RunResult) Command() string { } // indentLines indents every line in a bytes.Buffer and returns it as string -func indentLines(b *bytes.Buffer) string { - scanner := bufio.NewScanner(b) +func indentLines(b []byte) string { + scanner := bufio.NewScanner(bytes.NewReader(b)) var lines string for scanner.Scan() { lines = lines + "\t" + scanner.Text() + "\n" @@ -77,10 +77,10 @@ func indentLines(b *bytes.Buffer) string { func (rr RunResult) Output() string { var sb strings.Builder if rr.Stdout.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout))) + sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout.Bytes()))) } if rr.Stderr.Len() > 0 { - sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr))) + sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr.Bytes()))) } return sb.String() } diff --git a/test/integration/version_upgrade_test.go b/test/integration/version_upgrade_test.go index 2eb4eaabb6..fa6a4d4653 100644 --- a/test/integration/version_upgrade_test.go +++ b/test/integration/version_upgrade_test.go @@ -89,9 +89,10 @@ func TestVersionUpgrade(t *testing.T) { if err != nil { t.Logf("status error: %v (may be ok)", err) } + got := strings.TrimSpace(rr.Stdout.String()) if got != state.Stopped.String() { - t.Errorf("status = %q; want = %q", got, state.Stopped.String()) + t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String()) } args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) @@ -119,14 +120,16 @@ func TestVersionUpgrade(t *testing.T) { t.Fatalf("expected server version %s is not the same with latest version %s", cv.ServerVersion.GitVersion, constants.NewestKubernetesVersion) } + t.Logf("Attempting to downgrade Kubernetes (should fail)") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil { t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command()) } + t.Logf("Attempting restart after unsuccessful downgrade") args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...) rr, err = Run(t, exec.CommandContext(ctx, Target(), args...)) if err != nil { - t.Errorf("start and already started minikube failed. args: %q : %v", rr.Command(), err) + t.Errorf("start after failed upgrade: %v", err) } } From daffae3793b51921b8c74958a48c6446d1e7856b Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Thu, 26 Mar 2020 09:07:14 -0700 Subject: [PATCH 57/63] Wait for control-plane to upgrade before proceeding --- .../bootstrapper/bsutil/kverify/kverify.go | 35 ++++++++++- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 63 ++++++++++++++++--- 2 files changed, 86 insertions(+), 12 deletions(-) diff --git a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go index 39cd1ea169..04156f2e13 100644 --- a/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go +++ b/pkg/minikube/bootstrapper/bsutil/kverify/kverify.go @@ -30,9 +30,11 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/golang/glog" + "github.com/pkg/errors" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/kubernetes" kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/minikube/pkg/minikube/bootstrapper" @@ -61,6 +63,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c if _, ierr := apiServerPID(cr); ierr != nil { return false, nil } + return true, nil }) if err != nil { @@ -180,7 +183,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con } // WaitForHealthyAPIServer waits for api server status to be running -func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error { +func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error { glog.Infof("waiting for apiserver healthz status ...") hStart := time.Now() @@ -208,7 +211,35 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil { return fmt.Errorf("apiserver healthz never reported healthy") } - glog.Infof("duration metric: took %s to wait for apiserver healthz status ...", time.Since(hStart)) + + vcheck := func() (bool, error) { + if time.Since(start) > timeout { + return false, fmt.Errorf("cluster wait timed out during version check") + } + if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil { + glog.Warningf("api server version match failed: %v", err) + return false, nil + } + return true, nil + } + + if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil { + return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion) + } + + glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart)) + return nil +} + +func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error { + vi, err := client.ServerVersion() + if err != nil { + return errors.Wrap(err, "server version") + } + glog.Infof("control plane version: %s", vi) + if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 { + return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected) + } return nil } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 4ccc402698..ea2c15f6f2 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -56,6 +56,7 @@ import ( "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/vmpath" "k8s.io/minikube/pkg/util" + "k8s.io/minikube/pkg/util/retry" "k8s.io/minikube/pkg/version" ) @@ -251,6 +252,27 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { return nil } +// unpause unpauses any Kubernetes backplane components +func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error { + + cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c}) + if err != nil { + return err + } + + ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}}) + if err != nil { + return errors.Wrap(err, "list paused") + } + + if len(ids) > 0 { + if err := cr.UnpauseContainers(ids); err != nil { + return err + } + } + return nil +} + // StartCluster starts the cluster func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { start := time.Now() @@ -259,6 +281,11 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error { glog.Infof("StartCluster complete in %s", time.Since(start)) }() + // Before we start, ensure that no paused components are lurking around + if err := k.unpause(cfg); err != nil { + glog.Warningf("unpause failed: %v", err) + } + if err := bsutil.ExistingConfig(k.c); err == nil { glog.Infof("found existing configuration files, will attempt cluster restart") rerr := k.restartCluster(cfg) @@ -349,23 +376,23 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time return err } - if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil { - return err - } - - c, err := k.client(ip, port) + client, err := k.client(ip, port) if err != nil { return errors.Wrap(err, "get k8s client") } - if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil { + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, ip, port, timeout); err != nil { + return err + } + + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil { return errors.Wrap(err, "waiting for system pods") } return nil } // needsReset returns whether or not the cluster needs to be reconfigured -func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool { +func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset, version string) bool { if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil { glog.Infof("needs reset: configs differ:\n%s", rr.Output()) return true @@ -386,6 +413,12 @@ func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kube glog.Infof("needs reset: %v", err) return true } + + if err := kverify.APIServerVersionMatch(client, version); err != nil { + glog.Infof("needs reset: %v", err) + return true + } + return false } @@ -426,7 +459,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { // If the cluster is running, check if we have any work to do. conf := bsutil.KubeadmYamlPath - if !k.needsReset(conf, ip, port, client) { + if !k.needsReset(conf, ip, port, client, cfg.KubernetesConfig.KubernetesVersion) { glog.Infof("Taking a shortcut, as the cluster seems to be properly configured") return nil } @@ -466,12 +499,22 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error { return errors.Wrap(err, "apiserver healthz") } + if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), ip, port, kconst.DefaultControlPlaneTimeout); err != nil { + return errors.Wrap(err, "apiserver health") + } + if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil { return errors.Wrap(err, "system pods") } - if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))); err != nil { - return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command())) + // This can fail during upgrades if the old pods have not shut down yet + addonPhase := func() error { + _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))) + return err + } + if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil { + glog.Warningf("addon install failed, wil retry: %v", err) + return errors.Wrap(err, "addons") } if err := bsutil.AdjustResourceLimits(k.c); err != nil { From 974d45dfd31722df88f3e3a2ced80f0a4c1f76fd Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 26 Mar 2020 10:01:55 -0700 Subject: [PATCH 58/63] make http error test fatal to avoid nil pointer --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index a5ee68aa73..6623f1f362 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -335,7 +335,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) { resp, err := retryablehttp.Get(u.String()) if err != nil { - t.Errorf("failed to http get %q : %v", u.String(), err) + t.Fatalf("failed to http get %q : %v", u.String(), err) } if resp.StatusCode != http.StatusOK { body, err := ioutil.ReadAll(resp.Body) From efbe113a94fb37d880aaee006ee729c7b6ac4896 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:13:26 -0700 Subject: [PATCH 59/63] Add more logging to preload --- hack/preload-images/upload.go | 4 ++-- pkg/minikube/download/preload.go | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index a2181294e6..b903f3cc11 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,13 +30,13 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) - if output, err := cmd.Output(); err != nil { + if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) - if output, err := cmd.Output(); err != nil { + if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } return nil diff --git a/pkg/minikube/download/preload.go b/pkg/minikube/download/preload.go index 44b2f81749..dc834fb7f0 100644 --- a/pkg/minikube/download/preload.go +++ b/pkg/minikube/download/preload.go @@ -77,6 +77,7 @@ func remoteTarballURL(k8sVersion string) string { // PreloadExists returns true if there is a preloaded tarball that can be used func PreloadExists(k8sVersion, containerRuntime string) bool { + glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime) if !viper.GetBool("preload") { return false } @@ -85,6 +86,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool { // and https://github.com/kubernetes/minikube/issues/6934 // to track status of adding containerd & crio if containerRuntime != "docker" { + glog.Info("Container runtime isn't docker, skipping preload") return false } From 27685523611e514429d2c26f1c5d93871e1a700b Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:23:30 -0700 Subject: [PATCH 60/63] Use download.TarballExists to make sure tarball is publicly accessible in script generator --- hack/preload-images/preload_images.go | 9 +-------- hack/preload-images/upload.go | 7 +++++++ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 60b6bc9e2a..948918d8f9 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -62,7 +62,7 @@ func main() { for _, kv := range k8sVersions { for _, cr := range containerRuntimes { tf := download.TarballName(kv) - if tarballExists(tf) { + if download.PreloadExists(kv, cr) { fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv) continue } @@ -77,13 +77,6 @@ func main() { } } -func tarballExists(tarballFilename string) bool { - fmt.Println("Checking if tarball already exists...") - gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename) - cmd := exec.Command("gsutil", "stat", gcsPath) - return cmd.Run() == nil -} - func verifyDockerStorage() error { cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}") var stderr bytes.Buffer diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index b903f3cc11..f6235a38f4 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -37,6 +37,13 @@ func uploadTarball(tarballFilename string) error { gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) if output, err := cmd.CombinedOutput(); err != nil { + fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run + +gsutil acl ch -u AllUsers:R %s + +manually to make this link public, or rerun this script to rebuild and reupload the tarball. + + `, gcsPath) return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } return nil From 2a1ee510837ae5d870275dfa89c8acc4ded28ac2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:31:46 -0700 Subject: [PATCH 61/63] add logging --- hack/preload-images/upload.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index f6235a38f4..1a6c02af75 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,12 +30,14 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) + fmt.Printf("Running: %v", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) + fmt.Printf("Running: %v", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run From c9c324242ce6f768cd5ac924c628ec68ca634dd8 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:37:29 -0700 Subject: [PATCH 62/63] add newlines --- hack/preload-images/upload.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hack/preload-images/upload.go b/hack/preload-images/upload.go index 1a6c02af75..89bbd8004b 100644 --- a/hack/preload-images/upload.go +++ b/hack/preload-images/upload.go @@ -30,14 +30,14 @@ func uploadTarball(tarballFilename string) error { hostPath := path.Join("out/", tarballFilename) gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket) cmd := exec.Command("gsutil", "cp", hostPath, gcsDest) - fmt.Printf("Running: %v", cmd.Args) + fmt.Printf("Running: %v\n", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output)) } // Make tarball public to all users gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename) cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath) - fmt.Printf("Running: %v", cmd.Args) + fmt.Printf("Running: %v\n", cmd.Args) if output, err := cmd.CombinedOutput(); err != nil { fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run From e293b384311b7653922031b5951364150e583033 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Thu, 26 Mar 2020 10:42:45 -0700 Subject: [PATCH 63/63] Set --preload flag to true so that download.PreloadExists doesn't immeditely return false --- hack/preload-images/preload_images.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hack/preload-images/preload_images.go b/hack/preload-images/preload_images.go index 948918d8f9..928cd44821 100644 --- a/hack/preload-images/preload_images.go +++ b/hack/preload-images/preload_images.go @@ -23,6 +23,7 @@ import ( "os/exec" "strings" + "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/exit" ) @@ -45,6 +46,7 @@ func init() { if k8sVersion != "" { k8sVersions = append(k8sVersions, k8sVersion) } + viper.Set("preload", "true") } func main() {