pull/7044/head
Priya Wadhwa 2020-03-26 11:01:28 -07:00
commit 721a57c25a
69 changed files with 906 additions and 499 deletions

View File

@ -81,13 +81,27 @@ jobs:
GOPOGH_RESULT: ""
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-16.04
steps:
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Docker Info
shell: bash
run: |
docker info || true
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install lz4
shell: bash
run: |
@ -157,6 +171,12 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install lz4
shell: bash
run: |
@ -165,9 +185,17 @@ jobs:
- name: Docker Info
shell: bash
run: |
docker info || true
echo "--------------------------"
docker version || true
echo "--------------------------"
docker info || true
echo "--------------------------"
docker system df || true
echo "--------------------------"
docker system info || true
echo "--------------------------"
docker ps || true
echo "--------------------------"
- name: Install gopogh
shell: bash
run: |
@ -232,12 +260,20 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-16.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
- name: Install conntrack
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
- name: Install gopogh
shell: bash
run: |
@ -302,12 +338,20 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install kubectl
shell: bash
run: |
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
# conntrack is required for kubernetes 1.18 and higher
- name: Install conntrack
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
- name: Install tools for none
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
sudo apt-get -qq -y install socat
- name: Install gopogh
shell: bash
run: |
@ -372,11 +416,12 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: Install lz4
- name: Install kubectl
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl
sudo install kubectl /usr/local/bin/kubectl
kubectl version --client=true
- name: Install podman
shell: bash
run: |

View File

@ -625,7 +625,7 @@ release-kvm-driver: install-kvm-driver checksum ## Release KVM Driver
gsutil cp $(GOBIN)/docker-machine-driver-kvm2 gs://minikube/drivers/kvm/$(VERSION)/
gsutil cp $(GOBIN)/docker-machine-driver-kvm2.sha256 gs://minikube/drivers/kvm/$(VERSION)/
site/themes/docsy/assets/vendor/bootstrap/package.js:
site/themes/docsy/assets/vendor/bootstrap/package.js: ## update the website docsy theme git submodule
git submodule update -f --init --recursive
out/hugo/hugo:

View File

@ -200,6 +200,7 @@ func initDriverFlags() {
startCmd.Flags().String("driver", "", fmt.Sprintf("Driver is one of: %v (defaults to auto-detect)", driver.DisplaySupportedDrivers()))
startCmd.Flags().String("vm-driver", "", "DEPRECATED, use `driver` instead.")
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors")
startCmd.Flags().Bool("vm", false, "Filter to use only VM Drivers")
// kvm2
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (kvm2 driver only)")
@ -507,7 +508,7 @@ func selectDriver(existing *config.ClusterConfig) registry.DriverState {
return ds
}
pick, alts := driver.Suggest(driver.Choices())
pick, alts := driver.Suggest(driver.Choices(viper.GetBool("vm")))
if pick.Name == "" {
exit.WithCodeT(exit.Config, "Unable to determine a default driver to use. Try specifying --driver, or see https://minikube.sigs.k8s.io/docs/start/")
}

View File

@ -31,6 +31,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
@ -107,7 +108,7 @@ var statusCmd = &cobra.Command{
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, machineName, n.ControlPlane)
st, err = status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -150,12 +151,12 @@ func exitCode(st *Status) int {
return c
}
func status(api libmachine.API, name string, controlPlane bool) (*Status, error) {
func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) {
profile, node := driver.ClusterNameFromMachine(name)
controlPlane := n.ControlPlane
st := &Status{
Name: node,
Name: n.Name,
Host: Nonexistent,
APIServer: Nonexistent,
Kubelet: Nonexistent,
@ -163,6 +164,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error)
Worker: !controlPlane,
}
name := driver.MachineName(cc, n)
hs, err := machine.Status(api, name)
glog.Infof("%s host status = %q (err=%v)", name, hs, err)
if err != nil {
@ -205,7 +207,7 @@ func status(api libmachine.API, name string, controlPlane bool) (*Status, error)
}
if st.Kubeconfig != Irrelevant {
ok, err := kubeconfig.IsClusterInConfig(ip, profile)
ok, err := kubeconfig.IsClusterInConfig(ip, cc.Name)
glog.Infof("%s is in kubeconfig at ip %s: %v (err=%v)", name, ip, ok, err)
if ok {
st.Kubeconfig = Configured

View File

@ -69,8 +69,9 @@ func runStop(cmd *cobra.Command, args []string) {
func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool {
nonexistent := false
stop := func() (err error) {
machineName := driver.MachineName(cluster, n)
machineName := driver.MachineName(cluster, n)
tryStop := func() (err error) {
err = machine.StopHost(api, machineName)
if err == nil {
return nil
@ -87,7 +88,7 @@ func stop(api libmachine.API, cluster config.ClusterConfig, n config.Node) bool
}
}
if err := retry.Expo(stop, 5*time.Second, 3*time.Minute, 5); err != nil {
if err := retry.Expo(tryStop, 1*time.Second, 30*time.Second, 3); err != nil {
exit.WithError("Unable to stop VM", err)
}

View File

@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZ4=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@ -25,10 +26,10 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_SMP=y
@ -270,12 +271,14 @@ CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_BRIDGE=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_TBF=y
CONFIG_NET_SCH_NETEM=y
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_IPSET=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_BPF=m

2
go.sum
View File

@ -421,8 +421,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345 h1:XP1VL9iOZu4yz/rq8zj+yvB23XEY5erXRzp8JYmkWu0=
github.com/johanneswuerbach/nfsexports v0.0.0-20181204082207-1aa528dcb345/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU=
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f h1:tL0xH80QVHQOde6Qqdohv6PewABH8l8N9pywZtuojJ0=
github.com/johanneswuerbach/nfsexports v0.0.0-20200318065542-c48c3734757f/go.mod h1:+c1/kUpg2zlkoWqTOvzDs36Wpbm3Gd1nlmtXAEB0WGU=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=

View File

@ -53,11 +53,17 @@ sudo systemctl is-active --quiet kubelet \
# conntrack is required for kubernetes 1.18 and higher for none driver
if ! conntrack --version &>/dev/null; then
echo "WARNING: No contrack is not installed"
echo "WARNING: contrack is not installed. will try to install."
sudo apt-get update -qq
sudo apt-get -qq -y install conntrack
fi
# socat is required for kubectl port forward which is used in some tests such as validateHelmTillerAddon
if ! which socat &>/dev/null; then
echo "WARNING: socat is not installed. will try to install."
sudo apt-get update -qq
sudo apt-get -qq -y install socat
fi
mkdir -p cron && gsutil -m rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -23,6 +23,7 @@ import (
"os/exec"
"strings"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/exit"
)
@ -45,6 +46,7 @@ func init() {
if k8sVersion != "" {
k8sVersions = append(k8sVersions, k8sVersion)
}
viper.Set("preload", "true")
}
func main() {
@ -62,7 +64,7 @@ func main() {
for _, kv := range k8sVersions {
for _, cr := range containerRuntimes {
tf := download.TarballName(kv, cr)
if tarballExists(tf) {
if download.PreloadExists(kv, cr) {
fmt.Printf("A preloaded tarball for k8s version %s already exists, skipping generation.\n", kv)
continue
}
@ -77,13 +79,6 @@ func main() {
}
}
func tarballExists(tarballFilename string) bool {
fmt.Println("Checking if tarball already exists...")
gcsPath := fmt.Sprintf("gs://%s/%s", download.PreloadBucket, tarballFilename)
cmd := exec.Command("gsutil", "stat", gcsPath)
return cmd.Run() == nil
}
func verifyDockerStorage() error {
cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}")
var stderr bytes.Buffer

View File

@ -30,13 +30,22 @@ func uploadTarball(tarballFilename string) error {
hostPath := path.Join("out/", tarballFilename)
gcsDest := fmt.Sprintf("gs://%s", download.PreloadBucket)
cmd := exec.Command("gsutil", "cp", hostPath, gcsDest)
if output, err := cmd.Output(); err != nil {
fmt.Printf("Running: %v\n", cmd.Args)
if output, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output))
}
// Make tarball public to all users
gcsPath := fmt.Sprintf("%s/%s", gcsDest, tarballFilename)
cmd = exec.Command("gsutil", "acl", "ch", "-u", "AllUsers:R", gcsPath)
if output, err := cmd.Output(); err != nil {
fmt.Printf("Running: %v\n", cmd.Args)
if output, err := cmd.CombinedOutput(); err != nil {
fmt.Printf(`Failed to update ACLs on this tarball in GCS. Please run
gsutil acl ch -u AllUsers:R %s
manually to make this link public, or rerun this script to rebuild and reupload the tarball.
`, gcsPath)
return errors.Wrapf(err, "uploading %s to GCS bucket: %v\n%s", hostPath, err, string(output))
}
return nil

View File

@ -34,6 +34,7 @@ import (
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/storageclass"
"k8s.io/minikube/pkg/util/retry"
)
// defaultStorageClassProvisioner is the name of the default storage class provisioner
@ -211,13 +212,17 @@ func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon,
}
command := kubectlCommand(cc, deployFiles, enable)
glog.Infof("Running: %v", command)
rr, err := cmd.RunCmd(command)
if err != nil {
return errors.Wrapf(err, "addon apply")
// Retry, because sometimes we race against an apiserver restart
apply := func() error {
_, err := cmd.RunCmd(command)
if err != nil {
glog.Warningf("apply failed, will retry: %v", err)
}
return err
}
glog.Infof("output:\n%s", rr.Output())
return nil
return retry.Expo(apply, 1*time.Second, time.Second*30)
}
// enableOrDisableStorageClasses enables or disables storage classes

View File

@ -61,8 +61,8 @@ func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
if _, ok := extraOpts["node-ip"]; !ok {
extraOpts["node-ip"] = cp.IP
}
nodeName := KubeNodeName(mc, nc)
if nodeName != "" {
if _, ok := extraOpts["hostname-override"]; !ok {
nodeName := KubeNodeName(mc, nc)
extraOpts["hostname-override"] = nodeName
}

View File

@ -81,7 +81,7 @@ Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
@ -107,7 +107,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
@ -140,7 +140,7 @@ Wants=containerd.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=unix:///run/containerd/containerd.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.200 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,
@ -167,7 +167,7 @@ Wants=docker.socket
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.18.0-rc.1/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests
ExecStart=/var/lib/minikube/binaries/v1.18.0/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.2 --pod-manifest-path=/etc/kubernetes/manifests
[Install]
`,

View File

@ -30,9 +30,11 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/kubernetes"
kconst "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/minikube/pkg/minikube/bootstrapper"
@ -61,6 +63,7 @@ func WaitForAPIServerProcess(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
if _, ierr := apiServerPID(cr); ierr != nil {
return false, nil
}
return true, nil
})
if err != nil {
@ -180,7 +183,7 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con
}
// WaitForHealthyAPIServer waits for api server status to be running
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, start time.Time, ip string, port int, timeout time.Duration) error {
func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, ip string, port int, timeout time.Duration) error {
glog.Infof("waiting for apiserver healthz status ...")
hStart := time.Now()
@ -208,7 +211,35 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil {
return fmt.Errorf("apiserver healthz never reported healthy")
}
glog.Infof("duration metric: took %s to wait for apiserver healthz status ...", time.Since(hStart))
vcheck := func() (bool, error) {
if time.Since(start) > timeout {
return false, fmt.Errorf("cluster wait timed out during version check")
}
if err := APIServerVersionMatch(client, cfg.KubernetesConfig.KubernetesVersion); err != nil {
glog.Warningf("api server version match failed: %v", err)
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, vcheck); err != nil {
return fmt.Errorf("controlPlane never updated to %s", cfg.KubernetesConfig.KubernetesVersion)
}
glog.Infof("duration metric: took %s to wait for apiserver health ...", time.Since(hStart))
return nil
}
func APIServerVersionMatch(client *kubernetes.Clientset, expected string) error {
vi, err := client.ServerVersion()
if err != nil {
return errors.Wrap(err, "server version")
}
glog.Infof("control plane version: %s", vi)
if version.CompareKubeAwareVersionStrings(vi.String(), expected) != 0 {
return fmt.Errorf("controlPane = %q, expected: %q", vi.String(), expected)
}
return nil
}

View File

@ -51,10 +51,12 @@ import (
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/kubelet"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
"k8s.io/minikube/pkg/version"
)
@ -129,7 +131,7 @@ func (k *Bootstrapper) LogCommands(cfg config.ClusterConfig, o bootstrapper.LogO
dmesg.WriteString(fmt.Sprintf(" | tail -n %d", o.Lines))
}
describeNodes := fmt.Sprintf("sudo %s describe node -A --kubeconfig=%s",
describeNodes := fmt.Sprintf("sudo %s describe nodes --kubeconfig=%s",
path.Join(vmpath.GuestPersistentDir, "binaries", cfg.KubernetesConfig.KubernetesVersion, "kubectl"),
path.Join(vmpath.GuestPersistentDir, "kubeconfig"))
@ -181,20 +183,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error {
return nil
}
// StartCluster starts the cluster
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
err := bsutil.ExistingConfig(k.c)
if err == nil { // if there is an existing cluster don't reconfigure it
return k.restartCluster(cfg)
}
glog.Infof("existence check: %v", err)
start := time.Now()
glog.Infof("StartCluster: %+v", cfg)
defer func() {
glog.Infof("StartCluster complete in %s", time.Since(start))
}()
func (k *Bootstrapper) init(cfg config.ClusterConfig) error {
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
@ -237,10 +226,10 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
}
conf := bsutil.KubeadmYamlPath
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
rr, err := k.c.RunCmd(c)
if err != nil {
return errors.Wrapf(err, "init failed. output: %q", rr.Output())
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s",
bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
if _, err := k.c.RunCmd(c); err != nil {
return errors.Wrap(err, "run")
}
if cfg.Driver == driver.Docker {
@ -258,12 +247,75 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
}
if err := k.elevateKubeSystemPrivileges(cfg); err != nil {
glog.Warningf("unable to create cluster role binding, some addons might not work : %v. ", err)
glog.Warningf("unable to create cluster role binding, some addons might not work: %v", err)
}
return nil
}
// unpause unpauses any Kubernetes backplane components
func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error {
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return err
}
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}})
if err != nil {
return errors.Wrap(err, "list paused")
}
if len(ids) > 0 {
if err := cr.UnpauseContainers(ids); err != nil {
return err
}
}
return nil
}
// StartCluster starts the cluster
func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
start := time.Now()
glog.Infof("StartCluster: %+v", cfg)
defer func() {
glog.Infof("StartCluster complete in %s", time.Since(start))
}()
// Before we start, ensure that no paused components are lurking around
if err := k.unpause(cfg); err != nil {
glog.Warningf("unpause failed: %v", err)
}
if err := bsutil.ExistingConfig(k.c); err == nil {
glog.Infof("found existing configuration files, will attempt cluster restart")
rerr := k.restartCluster(cfg)
if rerr == nil {
return nil
}
out.T(out.Embarrassed, "Unable to restart cluster, will reset it: {{.error}}", out.V{"error": rerr})
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
glog.Warningf("delete failed: %v", err)
}
// Fall-through to init
}
conf := bsutil.KubeadmYamlPath
if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil {
return errors.Wrap(err, "cp")
}
err := k.init(cfg)
if err == nil {
return nil
}
out.T(out.Conflict, "initialization failed, will try again: {{.error}}", out.V{"error": err})
if err := k.DeleteCluster(cfg.KubernetesConfig); err != nil {
glog.Warningf("delete failed: %v", err)
}
return k.init(cfg)
}
func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) {
cp, err := config.PrimaryControlPlane(&cfg)
if err != nil {
@ -324,23 +376,23 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return err
}
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
return err
}
c, err := k.client(ip, port)
client, err := k.client(ip, port)
if err != nil {
return errors.Wrap(err, "get k8s client")
}
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, c, start, timeout); err != nil {
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, start, ip, port, timeout); err != nil {
return err
}
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, start, timeout); err != nil {
return errors.Wrap(err, "waiting for system pods")
}
return nil
}
// needsReset returns whether or not the cluster needs to be reconfigured
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool {
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset, version string) bool {
if rr, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
glog.Infof("needs reset: configs differ:\n%s", rr.Output())
return true
@ -361,6 +413,12 @@ func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kube
glog.Infof("needs reset: %v", err)
return true
}
if err := kverify.APIServerVersionMatch(client, version); err != nil {
glog.Infof("needs reset: %v", err)
return true
}
return false
}
@ -401,7 +459,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
// If the cluster is running, check if we have any work to do.
conf := bsutil.KubeadmYamlPath
if !k.needsReset(conf, ip, port, client) {
if !k.needsReset(conf, ip, port, client, cfg.KubernetesConfig.KubernetesVersion) {
glog.Infof("Taking a shortcut, as the cluster seems to be properly configured")
return nil
}
@ -410,8 +468,8 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
return errors.Wrap(err, "clearing stale configs")
}
if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil {
return errors.Wrap(err, "mv")
if _, err := k.c.RunCmd(exec.Command("sudo", "cp", conf+".new", conf)); err != nil {
return errors.Wrap(err, "cp")
}
baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase)
@ -425,9 +483,9 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
glog.Infof("resetting cluster from %s", conf)
// Run commands one at a time so that it is easier to root cause failures.
for _, c := range cmds {
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
if err != nil {
return errors.Wrapf(err, "running cmd: %s", rr.Command())
return errors.Wrap(err, "run")
}
}
@ -441,12 +499,22 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
return errors.Wrap(err, "apiserver healthz")
}
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, client, time.Now(), ip, port, kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "apiserver health")
}
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "system pods")
}
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))); err != nil {
return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command()))
// This can fail during upgrades if the old pods have not shut down yet
addonPhase := func() error {
_, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf)))
return err
}
if err = retry.Expo(addonPhase, 1*time.Second, 30*time.Second); err != nil {
glog.Warningf("addon install failed, wil retry: %v", err)
return errors.Wrap(err, "addons")
}
if err := bsutil.AdjustResourceLimits(k.c); err != nil {
@ -504,11 +572,32 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
cmd = fmt.Sprintf("%s reset", bsutil.InvokeKubeadm(k8s.KubernetesVersion))
}
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd)); err != nil {
return errors.Wrapf(err, "kubeadm reset: cmd: %q", rr.Command())
rr, derr := k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
if derr != nil {
glog.Warningf("%s: %v", rr.Command(), err)
}
return nil
if err := kubelet.ForceStop(k.c); err != nil {
glog.Warningf("stop kubelet: %v", err)
}
cr, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: k.c, Socket: k8s.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
if err != nil {
glog.Warningf("unable to list kube-system containers: %v", err)
}
if len(containers) > 0 {
glog.Warningf("found %d kube-system containers to stop", len(containers))
if err := cr.StopContainers(containers); err != nil {
glog.Warningf("error stopping containers: %v", err)
}
}
return derr
}
// SetupCerts sets up certificates within the cluster.
@ -531,7 +620,7 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
}
if err := r.Preload(cfg.KubernetesConfig); err != nil {
return errors.Wrap(err, "preloading")
glog.Infof("prelaoding failed, will try to load cached images: %v", err)
}
if cfg.KubernetesConfig.ShouldLoadCachedImages {
@ -619,7 +708,7 @@ func reloadKubelet(runner command.Runner) error {
return nil
}
startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf))
startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cp %s.new %s && sudo cp %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf))
if _, err := runner.RunCmd(startCmd); err != nil {
return errors.Wrap(err, "starting kubelet")
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package constants
import (
"errors"
"path/filepath"
"k8s.io/client-go/tools/clientcmd"
@ -26,9 +27,9 @@ import (
const (
// DefaultKubernetesVersion is the default kubernetes version
DefaultKubernetesVersion = "v1.18.0-rc.1"
DefaultKubernetesVersion = "v1.18.0"
// NewestKubernetesVersion is the newest Kubernetes version to test against
NewestKubernetesVersion = "v1.18.0-rc.1"
NewestKubernetesVersion = "v1.18.0"
// OldestKubernetesVersion is the oldest Kubernetes version to test against
OldestKubernetesVersion = "v1.11.10"
// DefaultClusterName is the default nane for the k8s cluster
@ -100,4 +101,7 @@ var (
"storage-gluster",
"istio-operator",
}
// ErrMachineMissing is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C)
ErrMachineMissing = errors.New("machine does not exist")
)

View File

@ -78,6 +78,7 @@ func remoteTarballURL(k8sVersion, containerRuntime string) string {
// PreloadExists returns true if there is a preloaded tarball that can be used
func PreloadExists(k8sVersion, containerRuntime string) bool {
glog.Infof("Checking if preload exists for k8s version %s and runtime %s", k8sVersion, containerRuntime)
if !viper.GetBool("preload") {
return false
}
@ -86,6 +87,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
// and https://github.com/kubernetes/minikube/issues/6934
// to track status of adding containerd & crio
if containerRuntime != "docker" {
glog.Info("Container runtime isn't docker, skipping preload")
return false
}

View File

@ -164,8 +164,8 @@ func FlagDefaults(name string) FlagHints {
}
// Choices returns a list of drivers which are possible on this system
func Choices() []registry.DriverState {
options := registry.Available()
func Choices(vm bool) []registry.DriverState {
options := registry.Available(vm)
// Descending priority for predictability and appearance
sort.Slice(options, func(i, j int) bool {
@ -234,13 +234,5 @@ func MachineName(cc config.ClusterConfig, n config.Node) string {
if len(cc.Nodes) == 1 || n.ControlPlane {
return cc.Name
}
return fmt.Sprintf("%s---%s", cc.Name, n.Name)
}
// ClusterNameFromMachine retrieves the cluster name embedded in the machine name
func ClusterNameFromMachine(name string) (string, string) {
if strings.Contains(name, "---") {
return strings.Split(name, "---")[0], strings.Split(name, "---")[1]
}
return name, name
return fmt.Sprintf("%s-%s", cc.Name, n.Name)
}

View File

@ -162,7 +162,7 @@ func TestSuggest(t *testing.T) {
}
}
got := Choices()
got := Choices(false)
gotNames := []string{}
for _, c := range got {
gotNames = append(gotNames, c.Name)

View File

@ -17,11 +17,13 @@ limitations under the License.
package machine
import (
"flag"
"fmt"
"testing"
"time"
// Driver used by testdata
"k8s.io/minikube/pkg/minikube/constants"
_ "k8s.io/minikube/pkg/minikube/registry/drvs/virtualbox"
"github.com/docker/machine/libmachine/drivers"
@ -41,6 +43,11 @@ func createMockDriverHost(c config.ClusterConfig, n config.Node) (interface{}, e
}
func RegisterMockDriver(t *testing.T) {
// Debugging this test is a nightmare.
if err := flag.Lookup("logtostderr").Value.Set("true"); err != nil {
t.Logf("unable to set logtostderr: %v", err)
}
t.Helper()
if !registry.Driver(driver.Mock).Empty() {
return
@ -163,7 +170,7 @@ func TestStartHostErrMachineNotExist(t *testing.T) {
// This should pass with creating host, while machine does not exist.
h, _, err = StartHost(api, mc, n)
if err != nil {
if err != ErrorMachineNotExist {
if err != constants.ErrMachineMissing {
t.Fatalf("Error starting host: %v", err)
}
}

View File

@ -22,11 +22,13 @@ import (
"time"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/docker/machine/libmachine/mcnerror"
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/out"
)
@ -85,11 +87,16 @@ func DeleteHost(api libmachine.API, machineName string) error {
}
out.T(out.DeletingHost, `Deleting "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName})
if err := host.Driver.Remove(); err != nil {
glog.Warningf("remove failed, will retry: %v", err)
time.Sleep(2 * time.Second)
return delete(api, host, machineName)
}
nerr := host.Driver.Remove()
// delete removes a host and it's local data files
func delete(api libmachine.API, h *host.Host, machineName string) error {
if err := h.Driver.Remove(); err != nil {
glog.Warningf("remove failed, will retry: %v", err)
time.Sleep(1 * time.Second)
nerr := h.Driver.Remove()
if nerr != nil {
return errors.Wrap(nerr, "host remove retry")
}
@ -100,3 +107,24 @@ func DeleteHost(api libmachine.API, machineName string) error {
}
return nil
}
// demolish destroys a host by any means necessary - use only if state is inconsistent
func demolish(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) {
machineName := driver.MachineName(cc, n)
glog.Infof("DEMOLISHING %s ...", machineName)
// This will probably fail
err := stop(h)
if err != nil {
glog.Infof("stophost failed (probably ok): %v", err)
}
// For 95% of cases, this should be enough
err = DeleteHost(api, machineName)
if err != nil {
glog.Warningf("deletehost failed: %v", err)
}
err = delete(api, h, machineName)
glog.Warningf("delete failed (probably ok) %v", err)
}

View File

@ -47,15 +47,8 @@ const (
maxClockDesyncSeconds = 2.1
)
var (
// ErrorMachineNotExist is returned when virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C)
ErrorMachineNotExist = errors.New("machine does not exist")
)
// fixHost fixes up a previously configured VM so that it is ready to run Kubernetes
func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.Host, error) {
out.T(out.Waiting, "Reconfiguring existing host ...")
start := time.Now()
glog.Infof("fixHost starting: %s", n.Name)
defer func() {
@ -67,21 +60,24 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
}
driverName := h.Driver.DriverName()
// check if need to re-run docker-env
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
maybeWarnAboutEvalEnv(driverName, cc.Name)
h, err = recreateIfNeeded(api, cc, n, h)
if err != nil {
return h, err
}
// Technically, we should only have to call provision if Docker has changed,
// but who can predict what shape the existing VM is in.
e := engineOptions(cc)
h.HostOptions.EngineOptions.Env = e.Env
err = provisionDockerMachine(h)
if err != nil {
return h, errors.Wrap(err, "provision")
// Avoid reprovisioning "none" driver because provision.Detect requires SSH
if !driver.BareMetal(h.Driver.DriverName()) {
e := engineOptions(cc)
h.HostOptions.EngineOptions.Env = e.Env
err = provisionDockerMachine(h)
if err != nil {
return h, errors.Wrap(err, "provision")
}
}
if driver.IsMock(h.DriverName) {
@ -93,59 +89,63 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
}
if driver.BareMetal(h.Driver.DriverName()) {
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName)
return h, nil
}
return h, ensureSyncedGuestClock(h, cc.Driver)
return h, ensureSyncedGuestClock(h, driverName)
}
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
s, err := h.Driver.GetState()
if err != nil || s == state.Stopped || s == state.None {
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
me, err := machineExists(h.Driver.DriverName(), s, err)
if !me {
// If the error is that virtual machine does not exist error, handle error(recreate virtual machine)
if err == ErrorMachineNotExist {
// remove virtual machine
if err := h.Driver.Remove(); err != nil {
// skip returning error since it may be before docker image pulling(so, no host exist)
if h.Driver.DriverName() != driver.Docker {
return nil, errors.Wrap(err, "host remove")
}
}
// remove machine config directory
if err := api.Remove(cc.Name); err != nil {
return nil, errors.Wrap(err, "api remove")
}
// recreate virtual machine
out.T(out.Meh, "machine '{{.name}}' does not exist. Proceeding ahead with recreating VM.", out.V{"name": cc.Name})
h, err = createHost(api, cc, n)
if err != nil {
return nil, errors.Wrap(err, "Error recreating VM")
}
// return ErrMachineNotExist err to initialize preExists flag
return h, ErrorMachineNotExist
}
// If the error is not that virtual machine does not exist error, return error
return nil, errors.Wrap(err, "Error getting state for host")
}
}
machineName := driver.MachineName(cc, n)
machineType := driver.MachineType(cc.Driver)
if s == state.Running {
out.T(out.Running, `Using the running {{.driver_name}} "{{.profile_name}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType})
} else {
out.T(out.Restarting, `Starting existing {{.driver_name}} {{.machine_type}} for "{{.profile_name}}" ...`, out.V{"driver_name": cc.Driver, "profile_name": cc.Name, "machine_type": machineType})
if err := h.Driver.Start(); err != nil {
return h, errors.Wrap(err, "driver start")
}
if err := api.Save(h); err != nil {
return h, errors.Wrap(err, "save")
recreated := false
s, serr := h.Driver.GetState()
glog.Infof("recreateIfNeeded on %s: state=%s err=%v", machineName, s, serr)
if serr != nil || s == state.Stopped || s == state.None {
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
me, err := machineExists(h.Driver.DriverName(), s, serr)
glog.Infof("exists: %v err=%v", me, err)
glog.Infof("%q vs %q", err, constants.ErrMachineMissing)
if !me || err == constants.ErrMachineMissing {
out.T(out.Shrug, `{{.driver_name}} "{{.cluster}}" {{.machine_type}} is missing, will recreate.`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
demolish(api, cc, n, h)
glog.Infof("Sleeping 1 second for extra luck!")
time.Sleep(1 * time.Second)
h, err = createHost(api, cc, n)
if err != nil {
return nil, errors.Wrap(err, "recreate")
}
recreated = true
s, serr = h.Driver.GetState()
}
}
if serr != constants.ErrMachineMissing {
glog.Warningf("unexpected machine state, will restart: %v", serr)
}
if s == state.Running {
if !recreated {
out.T(out.Running, `Updating the running {{.driver_name}} "{{.cluster}}" {{.machine_type}} ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
}
return h, nil
}
if !recreated {
out.T(out.Restarting, `Retarting existing {{.driver_name}} {{.machine_type}} for "{{.cluster}}" ...`, out.V{"driver_name": cc.Driver, "cluster": cc.Name, "machine_type": machineType})
}
if err := h.Driver.Start(); err != nil {
return h, errors.Wrap(err, "driver start")
}
if err := api.Save(h); err != nil {
return h, errors.Wrap(err, "save")
}
return h, nil
}
@ -222,7 +222,7 @@ func adjustGuestClock(h hostRunner, t time.Time) error {
func machineExistsState(s state.State, err error) (bool, error) {
if s == state.None {
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
}
return true, err
}
@ -231,7 +231,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) {
_ = s // not used
if err == drverr {
// if the error matches driver error
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
}
return true, err
}
@ -239,7 +239,7 @@ func machineExistsError(s state.State, err error, drverr error) (bool, error) {
func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
if s == state.None || (err != nil && err.Error() == msg) {
// if the error contains the message
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
}
return true, err
}
@ -247,10 +247,10 @@ func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
func machineExistsDocker(s state.State, err error) (bool, error) {
if s == state.Error {
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
} else if s == state.None {
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
}
return true, err
}
@ -282,7 +282,7 @@ func machineExists(d string, s state.State, err error) (bool, error) {
return machineExistsDocker(s, err)
case driver.Mock:
if s == state.Error {
return false, ErrorMachineNotExist
return false, constants.ErrMachineMissing
}
return true, err
default:

View File

@ -18,13 +18,14 @@ package machine
import (
"io/ioutil"
"os/exec"
"github.com/docker/machine/libmachine/drivers"
"github.com/docker/machine/libmachine/provision"
"github.com/golang/glog"
"github.com/shirou/gopsutil/cpu"
"github.com/shirou/gopsutil/disk"
"github.com/shirou/gopsutil/mem"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/out"
)
@ -80,18 +81,17 @@ func showLocalOsRelease() {
}
// logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM
func logRemoteOsRelease(drv drivers.Driver) {
provisioner, err := provision.DetectProvisioner(drv)
func logRemoteOsRelease(r command.Runner) {
rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release"))
if err != nil {
glog.Errorf("DetectProvisioner: %v", err)
glog.Infof("remote release failed: %v", err)
}
osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes())
if err != nil {
glog.Errorf("NewOsRelease: %v", err)
return
}
osReleaseInfo, err := provisioner.GetOsReleaseInfo()
if err != nil {
glog.Errorf("GetOsReleaseInfo: %v", err)
return
}
glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName)
glog.Infof("Remote host: %s", osReleaseInfo.PrettyName)
}

View File

@ -212,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
showLocalOsRelease()
}
if driver.IsVM(mc.Driver) {
logRemoteOsRelease(h.Driver)
logRemoteOsRelease(r)
}
return syncLocalAssets(r)
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package machine
import (
"time"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/docker/machine/libmachine/mcnerror"
@ -30,26 +32,36 @@ import (
// StopHost stops the host VM, saving state to disk.
func StopHost(api libmachine.API, machineName string) error {
host, err := api.Load(machineName)
glog.Infof("StopHost: %v", machineName)
h, err := api.Load(machineName)
if err != nil {
return errors.Wrapf(err, "load")
}
out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": host.DriverName})
if host.DriverName == driver.HyperV {
out.T(out.Stopping, `Stopping "{{.profile_name}}" in {{.driver_name}} ...`, out.V{"profile_name": machineName, "driver_name": h.DriverName})
return stop(h)
}
// stop forcibly stops a host without needing to load
func stop(h *host.Host) error {
start := time.Now()
if h.DriverName == driver.HyperV {
glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH")
if err := trySSHPowerOff(host); err != nil {
if err := trySSHPowerOff(h); err != nil {
return errors.Wrap(err, "ssh power off")
}
}
if err := host.Stop(); err != nil {
alreadyInStateError, ok := err.(mcnerror.ErrHostAlreadyInState)
if ok && alreadyInStateError.State == state.Stopped {
if err := h.Stop(); err != nil {
glog.Infof("stop err: %v", err)
st, ok := err.(mcnerror.ErrHostAlreadyInState)
if ok && st.State == state.Stopped {
glog.Infof("host is already stopped")
return nil
}
return &retry.RetriableError{Err: errors.Wrapf(err, "Stop: %s", machineName)}
return &retry.RetriableError{Err: errors.Wrap(err, "stop")}
}
glog.Infof("stop complete within %s", time.Since(start))
return nil
}

View File

@ -60,7 +60,6 @@ var styles = map[StyleEnum]style{
Running: {Prefix: "🏃 "},
Provisioning: {Prefix: "🌱 "},
Restarting: {Prefix: "🔄 "},
Reconfiguring: {Prefix: "📯 "},
Stopping: {Prefix: "✋ "},
Stopped: {Prefix: "🛑 "},
Warning: {Prefix: "❗ ", LowPrefix: lowWarning},
@ -92,7 +91,7 @@ var styles = map[StyleEnum]style{
Caching: {Prefix: "🤹 "},
StartingVM: {Prefix: "🔥 "},
StartingNone: {Prefix: "🤹 "},
Provisioner: {Prefix: " "},
Provisioner: {Prefix: " "},
Resetting: {Prefix: "🔄 "},
DeletingHost: {Prefix: "🔥 "},
Copying: {Prefix: "✨ "},
@ -117,7 +116,7 @@ var styles = map[StyleEnum]style{
Unmount: {Prefix: "🔥 "},
MountOptions: {Prefix: "💾 "},
Fileserver: {Prefix: "🚀 ", OmitNewline: true},
DryRun: {Prefix: "🏜️ "},
DryRun: {Prefix: "🌵 "},
AddonEnable: {Prefix: "🌟 "},
AddonDisable: {Prefix: "🌑 "},
}

View File

@ -32,7 +32,6 @@ const (
Running
Provisioning
Restarting
Reconfiguring
Stopping
Stopped
Warning

View File

@ -24,6 +24,40 @@ import (
"github.com/golang/glog"
)
const (
// Podman is Kubernetes in container using podman driver
Podman = "podman"
// Docker is Kubernetes in container using docker driver
Docker = "docker"
// Mock driver
Mock = "mock"
// None driver
None = "none"
)
// IsKIC checks if the driver is a kubernetes in container
func IsKIC(name string) bool {
return name == Docker || name == Podman
}
// IsMock checks if the driver is a mock
func IsMock(name string) bool {
return name == Mock
}
// IsVM checks if the driver is a VM
func IsVM(name string) bool {
if IsKIC(name) || IsMock(name) || BareMetal(name) {
return false
}
return true
}
// BareMetal returns if this driver is unisolated
func BareMetal(name string) bool {
return name == None || name == Mock
}
var (
// globalRegistry is a globally accessible driver registry
globalRegistry = newRegistry()
@ -59,7 +93,7 @@ func Driver(name string) DriverDef {
}
// Available returns a list of available drivers in the global registry
func Available() []DriverState {
func Available(vm bool) []DriverState {
sts := []DriverState{}
glog.Infof("Querying for installed drivers using PATH=%s", os.Getenv("PATH"))
@ -76,7 +110,13 @@ func Available() []DriverState {
priority = Unhealthy
}
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
if vm {
if IsVM(d.Name) {
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
}
} else {
sts = append(sts, DriverState{Name: d.Name, Priority: priority, State: s})
}
}
// Descending priority for predictability

View File

@ -102,7 +102,7 @@ func TestGlobalAvailable(t *testing.T) {
},
}
if diff := cmp.Diff(Available(), expected); diff != "" {
if diff := cmp.Diff(Available(false), expected); diff != "" {
t.Errorf("available mismatch (-want +got):\n%s", diff)
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package tests
import (
"runtime"
"testing"
"github.com/docker/machine/libmachine/drivers"
@ -24,6 +25,7 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/constants"
)
// MockDriver is a struct used to mock out libmachine.Driver
@ -96,11 +98,14 @@ func (d *MockDriver) GetSSHKeyPath() string {
// GetState returns the state of the driver
func (d *MockDriver) GetState() (state.State, error) {
d.Logf("MockDriver.GetState: %v", d.CurrentState)
if d.NotExistError {
_, file, no, _ := runtime.Caller(2)
d.Logf("MockDriver.GetState called from %s#%d: returning %q", file, no, d.CurrentState)
// NOTE: this logic is questionable
if d.NotExistError && d.CurrentState != state.Stopped && d.CurrentState != state.None {
d.CurrentState = state.Error
// don't use cluster.ErrorMachineNotExist to avoid import cycle
return d.CurrentState, errors.New("machine does not exist")
d.Logf("mock NotExistError set, setting state=%s err=%v", d.CurrentState, constants.ErrMachineMissing)
return d.CurrentState, constants.ErrMachineMissing
}
return d.CurrentState, nil
}
@ -123,12 +128,13 @@ func (d *MockDriver) Remove() error {
if d.RemoveError {
return errors.New("error deleting machine")
}
d.NotExistError = false
return nil
}
// Restart restarts the machine
func (d *MockDriver) Restart() error {
d.Logf("MockDriver.Restart")
d.Logf("MockDriver.Restart, setting CurrentState=%s", state.Running)
d.CurrentState = state.Running
return nil
}

View File

@ -29,18 +29,22 @@ import (
"github.com/docker/machine/libmachine/provision/pkgaction"
"github.com/docker/machine/libmachine/swarm"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/util/retry"
)
// BuildrootProvisioner provisions the custom system based on Buildroot
type BuildrootProvisioner struct {
provision.SystemdProvisioner
clusterName string
}
// NewBuildrootProvisioner creates a new BuildrootProvisioner
func NewBuildrootProvisioner(d drivers.Driver) provision.Provisioner {
return &BuildrootProvisioner{
NewSystemdProvisioner("buildroot", d),
viper.GetString(config.ProfileName),
}
}
@ -180,7 +184,7 @@ func (p *BuildrootProvisioner) Provision(swarmOptions swarm.Options, authOptions
}
glog.Infof("setting minikube options for container-runtime")
if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil {
if err := setContainerRuntimeOptions(p.clusterName, p); err != nil {
glog.Infof("Error setting container-runtime options during provisioning %v", err)
return err
}

View File

@ -39,7 +39,6 @@ import (
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/sshutil"
)
@ -209,8 +208,7 @@ func setRemoteAuthOptions(p provision.Provisioner) auth.Options {
}
func setContainerRuntimeOptions(name string, p miniProvisioner) error {
cluster, _ := driver.ClusterNameFromMachine(name)
c, err := config.Load(cluster)
c, err := config.Load(name)
if err != nil {
return errors.Wrap(err, "getting cluster config")
}

View File

@ -29,6 +29,8 @@ import (
"github.com/docker/machine/libmachine/provision/pkgaction"
"github.com/docker/machine/libmachine/swarm"
"github.com/golang/glog"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/util/retry"
)
@ -42,6 +44,7 @@ func NewUbuntuProvisioner(d drivers.Driver) provision.Provisioner {
return &UbuntuProvisioner{
BuildrootProvisioner{
NewSystemdProvisioner("ubuntu", d),
viper.GetString(config.ProfileName),
},
}
}
@ -185,7 +188,7 @@ func (p *UbuntuProvisioner) Provision(swarmOptions swarm.Options, authOptions au
}
glog.Infof("setting minikube options for container-runtime")
if err := setContainerRuntimeOptions(p.Driver.GetMachineName(), p); err != nil {
if err := setContainerRuntimeOptions(p.clusterName, p); err != nil {
glog.Infof("Error setting container-runtime options during provisioning %v", err)
return err
}

View File

@ -112,7 +112,10 @@ github_project_repo = ""
github_subdir = "site"
# Google Custom Search Engine ID. Remove or comment out to disable search.
gcs_engine_id = "005331096405080631692:s7c4yfpw9sy"
# gcs_engine_id = "005331096405080631692:s7c4yfpw9sy"
# enabling local search https://www.docsy.dev/docs/adding-content/navigation/#configure-local-search-with-lunr
offlineSearch = true
# User interface configuration
[params.ui]

View File

@ -83,12 +83,14 @@ A single command away from reproducing your production environment, from the com
{{% /blocks/feature %}}
{{% blocks/feature icon="fa-thumbs-up" title="Cross-platform" %}}
- Bare-metal
- HyperKit
- Hyper-V
- KVM
- Docker
- HyperKit
- Bare-metal
- VirtualBox
- Hyper-V
- VMware
- Podman
{{% /blocks/feature %}}
{{< /blocks/section >}}

View File

@ -62,6 +62,7 @@ If the issue is specific to an operating system, hypervisor, container, addon, o
- `co/kvm2`
- `co/none-driver`
- `co/docker-driver`
- `co/podman-driver`
- `co/virtualbox`
**co/[kubernetes component]** - When the issue appears specific to a k8s component

View File

@ -4,28 +4,18 @@ linkTitle: "docker"
weight: 3
date: 2020-02-05
description: >
Docker driver (EXPERIMENTAL)
Docker driver
---
## Overview
The Docker driver is an experimental VM-free driver that ships with minikube v1.7.
The Docker driver is the newest minikube driver. which runs kubernetes in container VM-free ! with full feature parity with minikube in VM.
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
This driver was inspired by the [kind project](https://kind.sigs.k8s.io/), and uses a modified version of its base image.
## Special features
- Cross platform (linux, macos, windows)
- No hypervisor required when run on Linux.
No hypervisor required when run on Linux.
## Limitations
As an experimental driver, not all commands are supported on all platforms. Notably: `mount,` `service`, `tunnel`, and others. Most of these limitations will be addressed by minikube v1.8 (March 2020)
## Issues
* [Full list of open 'kic-driver' issues](https://github.com/kubernetes/minikube/labels/co%2Fkic-driver)
## Troubleshooting
* Run `minikube start --alsologtostderr -v=1` to debug crashes
* If your docker is too slow on mac os try [Improving docker performance](https://docs.docker.com/docker-for-mac/osxfs-caching/)

View File

@ -7,7 +7,6 @@ date: 2018-08-05
description: >
Microsoft Hyper-V driver
---
## Overview
Hyper-V is a native hypervisor built in to modern versions of Microsoft Windows.

View File

@ -0,0 +1,7 @@
To use baremetal driver (none driver). verify that your operating system is Linux and also have 'systemd' installed.
```shell
pidof systemd && echo "yes" || echo "no"
```
If the above command outputs "no":
Your system is not suitable for none driver.

View File

@ -0,0 +1,11 @@
To use VM drivers, verify that your system has virtualization support enabled:
```shell
egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no
```
If the above command outputs "no":
- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver
- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled

View File

@ -0,0 +1,19 @@
To check if virtualization is supported, run the following command on your Windows terminal or command prompt.
```shell
systeminfo
```
If you see the following output, virtualization is supported:
```shell
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
Virtualization Enabled In Firmware: Yes
Second Level Address Translation: Yes
Data Execution Prevention Available: Yes
```
If you see the following output, your system already has a Hypervisor installed and you can skip the next step.
```shell
Hyper-V Requirements: A hypervisor has been detected.
```

View File

@ -0,0 +1,16 @@
## Install Docker
- [Docker Desktop](https://hub.docker.com/search?q=&type=edition&offering=community&sort=updated_at&order=desc)
## Usage
Start a cluster using the docker driver:
```shell
minikube start --driver=docker
```
To make docker the default driver:
```shell
minikube config set driver docker
```

View File

@ -0,0 +1,21 @@
## experimental
This is an experimental driver. please use it only for experimental reasons.
for a better kubernetes in container experience, use docker [driver](https://minikube.sigs.k8s.io/docs/reference/drivers/docker).
## Install Podman
- [Podman](https://podman.io/getting-started/installation.html)
## Usage
Start a cluster using the docker driver:
```shell
minikube start --driver=podman
```
To make docker the default driver:
```shell
minikube config set driver podman
```

View File

@ -8,12 +8,17 @@ description: >
Linux KVM (Kernel-based Virtual Machine) driver
---
## Overview
[KVM (Kernel-based Virtual Machine)](https://www.linux-kvm.org/page/Main_Page) is a full virtualization solution for Linux on x86 hardware containing virtualization extensions. To work with KVM, minikube uses the [libvirt virtualization API](https://libvirt.org/)
{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}}
## Check virtualization support
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
## Special features
The `minikube start` command supports 3 additional kvm specific flags:

View File

@ -0,0 +1,26 @@
---
title: "podman"
linkTitle: "podman"
weight: 3
date: 2020-03-26
description: >
Podman driver
---
## Overview
The podman driver is another kubernetes in container driver for minikube. simmilar to [docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker/) driver.
podman driver is currently experimental.
and only supported on Linux and MacOs (with a remote podman server)
## Try it with CRI-O container runtime.
```shell
minikube start --driver=podman --container-runtime=cri-o
```
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}

View File

@ -4,7 +4,7 @@ linkTitle: "Disk cache"
weight: 6
date: 2019-08-01
description: >
Cache Rules Everything Around Minikube
Cache Rules Everything Around minikube
---
minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations:

View File

@ -7,7 +7,7 @@ description: >
About persistent volumes (hostPath)
---
minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running Minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below.
minikube supports [PersistentVolumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) of type `hostPath` out of the box. These PersistentVolumes are mapped to a directory inside the running minikube instance (usually a VM, unless you use `--driver=none`, `--driver=docker`, or `--driver=podman`). For more information on how this works, read the Dynamic Provisioning section below.
## A note on mounts, persistence, and minikube hosts

View File

@ -1,4 +1,4 @@
### Getting to know Kubernetes
## Getting to know Kubernetes
Once started, you can use any regular Kubernetes command to interact with your minikube cluster. For example, you can see the pod states by running:
@ -6,16 +6,16 @@ Once started, you can use any regular Kubernetes command to interact with your m
kubectl get po -A
```
### Increasing memory allocation
## Increasing memory allocation
minikube only allocates 2GB of RAM by default, which is only enough for trivial deployments. For larger
minikube auto-selects the memory size based on your system up to 6000mb. For larger
deployments, increase the memory allocation using the `--memory` flag, or make the setting persistent using:
```shell
minikube config set memory 4096
minikube config set memory 8096
```
### Where to go next?
## Where to go next?
Visit the [examples](/docs/examples) page to get an idea of what you can do with minikube.

View File

@ -39,32 +39,39 @@ curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-{{< la
{{% /tab %}}
{{% /tabs %}}
## Hypervisor Setup
Verify that your system has virtualization support enabled:
```shell
egrep -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no
```
If the above command outputs "no":
- If you are running within a VM, your hypervisor does not allow nested virtualization. You will need to use the *None (bare-metal)* driver
- If you are running on a physical machine, ensure that your BIOS has hardware virtualization enabled
## Driver Setup
{{% tabs %}}
{{% tab "Docker" %}}
## Check container support
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
{{% /tab %}}
{{% tab "KVM" %}}
## Check virtualization support
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
{{% readfile file="/docs/Reference/Drivers/includes/kvm2_usage.inc" %}}
{{% /tab %}}
{{% tab "VirtualBox" %}}
## Check virtualization support
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_linux.inc" %}}
{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}}
{{% /tab %}}
{{% tab "None (bare-metal)" %}}
## Check baremetal support
{{% readfile file="/docs/Reference/Drivers/includes/check_baremetal_linux.inc" %}}
If you are already running minikube from inside a VM, it is possible to skip the creation of an additional VM layer by using the `none` driver.
{{% readfile file="/docs/Reference/Drivers/includes/none_usage.inc" %}}
{{% /tab %}}
{{% tab "Podman (experimental)" %}}
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}
{{% /tab %}}
{{% /tabs %}}
{{% readfile file="/docs/Start/includes/post_install.inc" %}}

View File

@ -50,6 +50,9 @@ brew upgrade minikube
## Hypervisor Setup
{{% tabs %}}
{{% tab "Docker" %}}
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
{{% /tab %}}
{{% tab "Hyperkit" %}}
{{% readfile file="/docs/Reference/Drivers/includes/hyperkit_usage.inc" %}}
{{% /tab %}}
@ -62,6 +65,9 @@ brew upgrade minikube
{{% tab "VMware" %}}
{{% readfile file="/docs/Reference/Drivers/includes/vmware_macos_usage.inc" %}}
{{% /tab %}}
{{% tab "Podman (experimental)" %}}
{{% readfile file="/docs/Reference/Drivers/includes/podman_usage.inc" %}}
{{% /tab %}}
{{% /tabs %}}

View File

@ -7,8 +7,6 @@ weight: 3
### Prerequisites
* Windows 8 or above
* A hypervisor, such as Hyper-V or VirtualBox
* Hardware virtualization support must be enabled in BIOS
* 4GB of RAM
### Installation
@ -30,33 +28,23 @@ After it has installed, close the current CLI session and reopen it. minikube sh
{{% /tab %}}
{{% /tabs %}}
## Hypervisor Setup
To check if virtualization is supported, run the following command on your Windows terminal or command prompt.
```shell
systeminfo
```
If you see the following output, virtualization is supported:
```shell
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
Virtualization Enabled In Firmware: Yes
Second Level Address Translation: Yes
Data Execution Prevention Available: Yes
```
If you see the following output, your system already has a Hypervisor installed and you can skip the next step.
```shell
Hyper-V Requirements: A hypervisor has been detected.
```
{{% tabs %}}
{{% tab "Docker" %}}
{{% readfile file="/docs/Reference/Drivers/includes/docker_usage.inc" %}}
{{% /tab %}}
{{% tab "Hyper-V" %}}
## Check Hypervisor
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}}
{{% readfile file="/docs/Reference/Drivers/includes/hyperv_usage.inc" %}}
{{% /tab %}}
{{% tab "VirtualBox" %}}
## Check Hypervisor
{{% readfile file="/docs/Reference/Drivers/includes/check_virtualization_windows.inc" %}}
{{% readfile file="/docs/Reference/Drivers/includes/virtualbox_usage.inc" %}}
{{% /tab %}}
{{% /tabs %}}

View File

@ -9,13 +9,13 @@ description: >
## Overview
Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case.
Most continuous integration environments are already running inside a VM, and may not support nested virtualization. The `none` driver was designed for this use case. or you could alternatively use the [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker).
## Prerequisites
- VM running a systemd based Linux distribution
## Tutorial
## using none driver
Here is an example, that runs minikube from a non-root user, and ensures that the latest stable kubectl is installed:
@ -39,3 +39,7 @@ touch $KUBECONFIG
sudo -E minikube start --driver=none
```
## Alternative ways
you could alternatively use minikube's container drivers such as [Docker](https://minikube.sigs.k8s.io/docs/reference/drivers/docker) or [Podman](https://minikube.sigs.k8s.io/docs/reference/drivers/podman).

View File

@ -98,7 +98,7 @@ to expose GPUs with `--driver=kvm2`. Please don't mix these instructions.
## Why does minikube not support NVIDIA GPUs on macOS?
VM drivers supported by minikube for macOS doesn't support GPU passthrough:
drivers supported by minikube for macOS doesn't support GPU passthrough:
- [mist64/xhyve#108](https://github.com/mist64/xhyve/issues/108)
- [moby/hyperkit#159](https://github.com/moby/hyperkit/issues/159)

View File

@ -11,7 +11,7 @@ description: >
Most organizations deploy their own Root Certificate and CA service inside the corporate networks.
Internal websites, image repositories and other resources may install SSL server certificates issued by this CA service for security and privacy concerns.
You may install the Root Certificate into the minikube VM to access these corporate resources within the cluster.
You may install the Root Certificate into the minikube cluster to access these corporate resources within the cluster.
## Prerequisites
@ -26,13 +26,13 @@ You may install the Root Certificate into the minikube VM to access these corpor
openssl x509 -inform der -in my_company.cer -out my_company.pem
```
* You may need to delete existing minikube VM
* You may need to delete existing minikube cluster
```shell
minikube delete
```
* Copy the certificate before creating the minikube VM
* Copy the certificate before creating the minikube cluster
```shell
mkdir -p $HOME/.minikube/certs

@ -1 +1 @@
Subproject commit 493bb1a0af92d1242f8396aeb1661dcd3a010db7
Subproject commit 3123298f5b0f56b3315b55319e17a8fa6c9d98f9

View File

@ -67,7 +67,7 @@ func TestDownloadOnly(t *testing.T) {
}
if err != nil {
t.Errorf("%s failed: %v", args, err)
t.Errorf("failed to download only. args: %q %v", args, err)
}
// skip for none, as none driver does not have preload feature.
@ -75,14 +75,14 @@ func TestDownloadOnly(t *testing.T) {
if download.PreloadExists(v, r) {
// Just make sure the tarball path exists
if _, err := os.Stat(download.TarballPath(v, r)); err != nil {
t.Errorf("preloaded tarball path doesn't exist: %v", err)
t.Errorf("failed to verify preloaded tarball file exists: %v", err)
}
return
}
}
imgs, err := images.Kubeadm("", v)
if err != nil {
t.Errorf("kubeadm images: %v %+v", v, err)
t.Errorf("failed to get kubeadm images for %v: %+v", v, err)
}
// skip verify for cache images if --driver=none
@ -129,7 +129,7 @@ func TestDownloadOnly(t *testing.T) {
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err)
}
})
// Delete should always succeed, even if previously partially or fully deleted.
@ -139,7 +139,7 @@ func TestDownloadOnly(t *testing.T) {
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to delete. args: %q: %v", rr.Command(), err)
}
})
})
@ -158,24 +158,24 @@ func TestDownloadOnlyKic(t *testing.T) {
args := []string{"start", "--download-only", "-p", profile, "--force", "--alsologtostderr"}
args = append(args, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v:\n%s", args, err, rr.Output())
if _, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err != nil {
t.Errorf("start with download only failed %q : %v", args, err)
}
// Make sure the downloaded image tarball exists
tarball := download.TarballPath(constants.DefaultKubernetesVersion, cRuntime)
contents, err := ioutil.ReadFile(tarball)
if err != nil {
t.Errorf("reading tarball: %v", err)
t.Errorf("failed to read tarball file %q: %v", tarball, err)
}
// Make sure it has the correct checksum
checksum := md5.Sum(contents)
remoteChecksum, err := ioutil.ReadFile(download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime))
if err != nil {
t.Errorf("reading checksum file: %v", err)
t.Errorf("failed to read checksum file %q : %v", download.PreloadChecksumPath(constants.DefaultKubernetesVersion, cRuntime), err)
}
if string(remoteChecksum) != string(checksum[:]) {
t.Errorf("checksum of %s does not match remote checksum (%s != %s)", tarball, string(remoteChecksum), string(checksum[:]))
t.Errorf("failed to verify checksum. checksum of %q does not match remote checksum (%q != %q)", tarball, string(remoteChecksum), string(checksum[:]))
}
}

View File

@ -53,7 +53,7 @@ func TestOffline(t *testing.T) {
rr, err := Run(t, c)
if err != nil {
// Fatal so that we may collect logs before stop/delete steps
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
})
}

View File

@ -43,7 +43,7 @@ func TestAddons(t *testing.T) {
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// Parallelized tests
@ -69,15 +69,15 @@ func TestAddons(t *testing.T) {
// Assert that disable/enable works offline
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err)
}
}
@ -88,30 +88,30 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("kubernetes client: %v", client)
t.Fatalf("failed to get kubernetes client: %v", client)
}
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "nginx-ingress-controller", Minutes(6)); err != nil {
t.Errorf("waiting for ingress-controller deployment to stabilize: %v", err)
t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err)
}
if _, err := PodWait(ctx, t, profile, "kube-system", "app.kubernetes.io/name=nginx-ingress-controller", Minutes(12)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waititing for nginx-ingress-controller : %v", err)
}
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ing.yaml")))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to kubectl replace nginx-ing. args %q. %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-pod-svc.yaml")))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to kubectl replace nginx-pod-svc. args %q. %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for ngnix pod: %v", err)
}
if err := kapi.WaitForService(client, "default", "nginx", true, time.Millisecond*500, Minutes(10)); err != nil {
t.Errorf("Error waiting for nginx service to be up")
t.Errorf("failed waiting for nginx service to be up: %v", err)
}
want := "Welcome to nginx!"
@ -121,65 +121,65 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
t.Logf("%v: unexpected stderr: %s (may be temproary)", rr.Command(), rr.Stderr)
}
if !strings.Contains(rr.Stdout.String(), want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
}
return nil
}
if err := retry.Expo(checkIngress, 500*time.Millisecond, Seconds(90)); err != nil {
t.Errorf("ingress never responded as expected on 127.0.0.1:80: %v", err)
t.Errorf("failed to get response from ngninx ingress on 127.0.0.1:80: %v", err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "ingress", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to disable ingress addon. args %q : %v", rr.Command(), err)
}
}
func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("kubernetes client: %v", client)
t.Fatalf("failed to get kubernetes client for %s : %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForRCToStabilize(client, "kube-system", "registry", Minutes(6)); err != nil {
t.Errorf("waiting for registry replicacontroller to stabilize: %v", err)
t.Errorf("failed waiting for registry replicacontroller to stabilize: %v", err)
}
t.Logf("registry stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "kube-system", "actual-registry=true", Minutes(6)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for pod actual-registry: %v", err)
}
if _, err := PodWait(ctx, t, profile, "kube-system", "registry-proxy=true", Minutes(10)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for pod registry-proxy: %v", err)
}
// Test from inside the cluster (no curl available on busybox)
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "po", "-l", "run=registry-test", "--now"))
if err != nil {
t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Args, err)
t.Logf("pre-cleanup %s failed: %v (not a problem)", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "registry-test", "--restart=Never", "--image=busybox", "-it", "--", "sh", "-c", "wget --spider -S http://registry.kube-system.svc.cluster.local"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to hit registry.kube-system.svc.cluster.local. args %q failed: %v", rr.Command(), err)
}
want := "HTTP/1.1 200"
if !strings.Contains(rr.Stdout.String(), want) {
t.Errorf("curl = %q, want *%s*", rr.Stdout.String(), want)
t.Errorf("expected curl response be %q, but got *%s*", want, rr.Stdout.String())
}
// Test from outside the cluster
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ip"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed run minikube ip. args %q : %v", rr.Command(), err)
}
if rr.Stderr.String() != "" {
t.Errorf("%s: unexpected stderr: %s", rr.Args, rr.Stderr)
t.Errorf("expected stderr to be -empty- but got: *%q* . args %q", rr.Stderr, rr.Command())
}
endpoint := fmt.Sprintf("http://%s:%d", strings.TrimSpace(rr.Stdout.String()), 5000)
@ -199,30 +199,30 @@ func validateRegistryAddon(ctx context.Context, t *testing.T, profile string) {
return nil
}
if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Minutes(2)); err != nil {
t.Errorf(err.Error())
if err := retry.Expo(checkExternalAccess, 500*time.Millisecond, Seconds(150)); err != nil {
t.Errorf("failed to check external access to %s: %v", u.String(), err.Error())
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "registry", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to disable registry addon. args %q: %v", rr.Command(), err)
}
}
func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("kubernetes client: %v", client)
t.Fatalf("failed to get kubernetes client for %s: %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "metrics-server", Minutes(6)); err != nil {
t.Errorf("waiting for metrics-server deployment to stabilize: %v", err)
t.Errorf("failed waiting for metrics-server deployment to stabilize: %v", err)
}
t.Logf("metrics-server stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "kube-system", "k8s-app=metrics-server", Minutes(6)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for k8s-app=metrics-server pod: %v", err)
}
want := "CPU(cores)"
@ -232,63 +232,71 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
}
if !strings.Contains(rr.Stdout.String(), want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
}
return nil
}
// metrics-server takes some time to be able to collect metrics
if err := retry.Expo(checkMetricsServer, time.Second*3, Minutes(6)); err != nil {
t.Errorf(err.Error())
t.Errorf("failed checking metric server: %v", err.Error())
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "metrics-server", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to disable metrics-server addon: args %q: %v", rr.Command(), err)
}
}
func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("kubernetes client: %v", client)
t.Fatalf("failed to get kubernetes client for %s: %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil {
t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err)
t.Errorf("failed waiting for tiller-deploy deployment to stabilize: %v", err)
}
t.Logf("tiller-deploy stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for helm pod: %v", err)
}
if NoneDriver() {
_, err := exec.LookPath("socat")
if err != nil {
t.Skipf("socat is required by kubectl to complete this test")
}
}
want := "Server: &version.Version"
// Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod)
checkHelmTiller := func() error {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version"))
if err != nil {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
}
if !strings.Contains(rr.Stdout.String(), want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
}
return nil
}
if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil {
t.Errorf(err.Error())
t.Errorf("failed checking helm tiller: %v", err.Error())
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err)
}
}

View File

@ -39,27 +39,27 @@ func TestDockerFlags(t *testing.T) {
args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=1800", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=Environment --no-pager"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err)
}
for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} {
if !strings.Contains(rr.Stdout.String(), envVar) {
t.Errorf("env var %s missing: %s.", envVar, rr.Stdout)
t.Errorf("expected env key/value %q to be passed to minikube's docker and be included in: *%q*.", envVar, rr.Stdout)
}
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo systemctl show docker --property=ExecStart --no-pager"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed on the second 'systemctl show docker' inside minikube. args %q: %v", rr.Command(), err)
}
for _, opt := range []string{"--debug", "--icc=true"} {
if !strings.Contains(rr.Stdout.String(), opt) {
t.Fatalf("%s = %q, want *%s*", rr.Command(), rr.Stdout, opt)
t.Fatalf("expected %q output to have include *%s* . output: %q", rr.Command(), opt, rr.Stdout)
}
}
}

View File

@ -66,10 +66,10 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
defer func() {
if t.Failed() {
t.Logf("%s failed, getting debug info...", t.Name())
t.Logf("%q failed, getting debug info...", t.Name())
rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "mount | grep 9p; ls -la /mount-9p; cat /mount-9p/pod-dates"))
if err != nil {
t.Logf("%s: %v", rr.Command(), err)
t.Logf("debugging command %q failed : %v", rr.Command(), err)
} else {
t.Logf("(debug) %s:\n%s", rr.Command(), rr.Stdout)
}
@ -78,7 +78,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
// Cleanup in advance of future tests
rr, err := Run(t, exec.Command(Target(), "-p", profile, "ssh", "sudo umount -f /mount-9p"))
if err != nil {
t.Logf("%s: %v", rr.Command(), err)
t.Logf("%q: %v", rr.Command(), err)
}
ss.Stop(t)
cancel()
@ -117,7 +117,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
// Assert that we can access the mount without an error. Display for debugging.
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "--", "ls", "-la", guestMount))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed verifying accessing to the mount. args %q : %v", rr.Command(), err)
}
t.Logf("guest mount directory contents\n%s", rr.Stdout)
@ -125,7 +125,7 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
tp := filepath.Join("/mount-9p", testMarker)
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "cat", tp))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to verify the mount contains unique test marked: args %q: %v", rr.Command(), err)
}
if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) {
@ -136,28 +136,28 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
// Start the "busybox-mount" pod.
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox-mount-test.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to 'kubectl replace' for busybox-mount-test. args %q : %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox-mount", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for busybox-mount pod: %v", err)
}
// Read the file written by pod startup
p := filepath.Join(tempDir, createdByPod)
got, err := ioutil.ReadFile(p)
if err != nil {
t.Errorf("readfile %s: %v", p, err)
t.Errorf("failed to read file created by pod %q: %v", p, err)
}
wantFromPod := []byte("test\n")
if !bytes.Equal(got, wantFromPod) {
t.Errorf("%s = %q, want %q", p, got, wantFromPod)
t.Errorf("the content of the file %q is %q, but want it to be: *%q*", p, got, wantFromPod)
}
// test that file written from host was read in by the pod via cat /mount-9p/written-by-host;
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "logs", "busybox-mount"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to get kubectl logs for busybox-mount. args %q : %v", rr.Command(), err)
}
if !bytes.Equal(rr.Stdout.Bytes(), wantFromTest) {
t.Errorf("busybox-mount logs = %q, want %q", rr.Stdout.Bytes(), wantFromTest)
@ -169,27 +169,27 @@ func validateMountCmd(ctx context.Context, t *testing.T, profile string) {
// test that file written from host was read in by the pod via cat /mount-9p/fromhost;
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "stat", gp))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to stat the file %q iniside minikube : args %q: %v", gp, rr.Command(), err)
}
if runtime.GOOS == "windows" {
if strings.Contains(rr.Stdout.String(), "Access: 1970-01-01") {
t.Errorf("invalid access time: %v", rr.Stdout)
t.Errorf("expected to get valid access time but got: %q", rr.Stdout)
}
}
if strings.Contains(rr.Stdout.String(), "Modify: 1970-01-01") {
t.Errorf("invalid modify time: %v", rr.Stdout)
t.Errorf("expected to get valid modify time but got: %q", rr.Stdout)
}
}
p = filepath.Join(tempDir, createdByTestRemovedByPod)
if _, err := os.Stat(p); err == nil {
t.Errorf("expected file %s to be removed", p)
t.Errorf("expected file %q to be removed but exists !", p)
}
p = filepath.Join(tempDir, createdByPodRemovedByTest)
if err := os.Remove(p); err != nil {
t.Errorf("unexpected error removing file %s: %v", p, err)
t.Errorf("failed to remove file %q: %v", p, err)
}
}

View File

@ -38,7 +38,7 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
defer cancel()
if _, err := PodWait(ctx, t, profile, "kube-system", "integration-test=storage-provisioner", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for storage-provisioner: %v", err)
}
checkStorageClass := func() error {
@ -58,13 +58,13 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
// Ensure the addon-manager has created the StorageClass before creating a claim, otherwise it won't be bound
if err := retry.Expo(checkStorageClass, time.Millisecond*500, Seconds(100)); err != nil {
t.Errorf("no default storage class after retry: %v", err)
t.Errorf("failed to check for storage class: %v", err)
}
// Now create a testpvc
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err)
}
checkStoragePhase := func() error {
@ -84,6 +84,6 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
}
if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil {
t.Fatalf("PV Creation failed with error: %v", err)
t.Fatalf("failed to check storage phase: %v", err)
}
}

View File

@ -50,7 +50,7 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("client: %v", err)
t.Fatalf("failed to get kubernetes client for %q: %v", profile, err)
}
// Pre-Cleanup
@ -62,14 +62,14 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
args := []string{"-p", profile, "tunnel", "--alsologtostderr", "-v=1"}
ss, err := Start(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", args, err)
t.Errorf("failed to start a tunnel: args %q: %v", args, err)
}
defer ss.Stop(t)
// Start the "nginx" pod.
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "testsvc.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx-svc", Minutes(4)); err != nil {
t.Fatalf("wait: %v", err)
@ -97,9 +97,9 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "svc", "nginx-svc"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
t.Logf("kubectl get svc nginx-svc:\n%s", rr.Stdout)
t.Logf("failed to kubectl get svc nginx-svc:\n%s", rr.Stdout)
}
got := []byte{}
@ -120,11 +120,11 @@ func validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {
return nil
}
if err = retry.Expo(fetch, time.Millisecond*500, Minutes(2), 13); err != nil {
t.Errorf("failed to contact nginx at %s: %v", nginxIP, err)
t.Errorf("failed to hit nginx at %q: %v", nginxIP, err)
}
want := "Welcome to nginx!"
if !strings.Contains(string(got), want) {
t.Errorf("body = %q, want *%s*", got, want)
t.Errorf("expected body to contain %q, but got *%q*", want, got)
}
}

View File

@ -63,11 +63,11 @@ func TestFunctional(t *testing.T) {
}
p := localSyncTestPath()
if err := os.Remove(p); err != nil {
t.Logf("unable to remove %s: %v", p, err)
t.Logf("unable to remove %q: %v", p, err)
}
p = localTestCertPath()
if err := os.Remove(p); err != nil {
t.Logf("unable to remove %s: %v", p, err)
t.Logf("unable to remove %q: %v", p, err)
}
CleanupWithLogs(t, profile, cancel)
}()
@ -137,7 +137,7 @@ func TestFunctional(t *testing.T) {
func validateNodeLabels(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "nodes", "--output=go-template", "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to 'kubectl get nodes' with args %q: %v", rr.Command(), err)
}
expectedLabels := []string{"minikube.k8s.io/commit", "minikube.k8s.io/version", "minikube.k8s.io/updated_at", "minikube.k8s.io/name"}
for _, el := range expectedLabels {
@ -155,10 +155,10 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile)
rr, err := Run(t, c)
if err != nil {
t.Fatalf("Failed to do minikube status after eval-ing docker-env %s", err)
t.Fatalf("failed to do minikube status after eval-ing docker-env %s", err)
}
if !strings.Contains(rr.Output(), "Running") {
t.Fatalf("Expected status output to include 'Running' after eval docker-env but got \n%s", rr.Output())
t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%q*", rr.Output())
}
mctx, cancel = context.WithTimeout(ctx, Seconds(13))
@ -167,12 +167,12 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
c = exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images")
rr, err = Run(t, c)
if err != nil {
t.Fatalf("Failed to test eval docker-evn %s", err)
t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err)
}
expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner"
if !strings.Contains(rr.Output(), expectedImgInside) {
t.Fatalf("Expected 'docker ps' to have %q from docker-daemon inside minikube. the docker ps output is:\n%q\n", expectedImgInside, rr.Output())
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%q*", expectedImgInside, rr.Output())
}
}
@ -180,11 +180,11 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
srv, err := startHTTPProxy(t)
if err != nil {
t.Fatalf("Failed to set up the test proxy: %s", err)
t.Fatalf("failed to set up the test proxy: %s", err)
}
// Use more memory so that we may reliably fit MySQL and nginx
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory", "2500MB"}, StartArgs()...)
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
c := exec.CommandContext(ctx, Target(), startArgs...)
env := os.Environ()
env = append(env, fmt.Sprintf("HTTP_PROXY=%s", srv.Addr))
@ -192,7 +192,7 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
c.Env = env
rr, err := Run(t, c)
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed minikube start. args %q: %v", rr.Command(), err)
}
want := "Found network options:"
@ -210,10 +210,10 @@ func validateStartWithProxy(ctx context.Context, t *testing.T, profile string) {
func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "config", "current-context"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to get current-context. args %q : %v", rr.Command(), err)
}
if !strings.Contains(rr.Stdout.String(), profile) {
t.Errorf("current-context = %q, want %q", rr.Stdout.String(), profile)
t.Errorf("expected current-context = %q, but got *%q*", profile, rr.Stdout.String())
}
}
@ -221,22 +221,23 @@ func validateKubeContext(ctx context.Context, t *testing.T, profile string) {
func validateKubectlGetPods(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "po", "-A"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to get kubectl pods: args %q : %v", rr.Command(), err)
}
if rr.Stderr.String() != "" {
t.Errorf("%s: got unexpected stderr: %s", rr.Command(), rr.Stderr)
t.Errorf("expected stderr to be empty but got *%q*: args %q", rr.Stderr, rr.Command())
}
if !strings.Contains(rr.Stdout.String(), "kube-system") {
t.Errorf("%s = %q, want *kube-system*", rr.Command(), rr.Stdout)
t.Errorf("expected stdout to include *kube-system* but got *%q*. args: %q", rr.Stdout, rr.Command())
}
}
// validateMinikubeKubectl validates that the `minikube kubectl` command returns content
func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string) {
kubectlArgs := []string{"kubectl", "--", "get", "pods"}
// Must set the profile so that it knows what version of Kubernetes to use
kubectlArgs := []string{"-p", profile, "kubectl", "--", "--context", profile, "get", "pods"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), kubectlArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to get pods. args %q: %v", rr.Command(), err)
}
}
@ -244,12 +245,12 @@ func validateMinikubeKubectl(ctx context.Context, t *testing.T, profile string)
func validateComponentHealth(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "cs", "-o=json"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to get components. args %q: %v", rr.Command(), err)
}
cs := api.ComponentStatusList{}
d := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes()))
if err := d.Decode(&cs); err != nil {
t.Fatalf("decode: %v", err)
t.Fatalf("failed to decode kubectl json output: args %q : %v", rr.Command(), err)
}
for _, i := range cs.Items {
@ -269,40 +270,41 @@ func validateComponentHealth(ctx context.Context, t *testing.T, profile string)
func validateStatusCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
// Custom format
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-f", "host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}}"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to run minikube status with custom format: args %q: %v", rr.Command(), err)
}
match, _ := regexp.MatchString(`host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`, rr.Stdout.String())
re := `host:([A-z]+),kublet:([A-z]+),apiserver:([A-z]+),kubeconfig:([A-z]+)`
match, _ := regexp.MatchString(re, rr.Stdout.String())
if !match {
t.Errorf("%s failed: %v. Output for custom format did not match", rr.Args, err)
t.Errorf("failed to match regex %q for minikube status with custom format. args %q. output %q", re, rr.Command(), rr.Output())
}
// Json output
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "-o", "json"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to run minikube status with json output. args %q : %v", rr.Command(), err)
}
var jsonObject map[string]interface{}
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to decode json from minikube status. args %q. %v", rr.Command(), err)
}
if _, ok := jsonObject["Host"]; !ok {
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Host")
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Host")
}
if _, ok := jsonObject["Kubelet"]; !ok {
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubelet")
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubelet")
}
if _, ok := jsonObject["APIServer"]; !ok {
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "APIServer")
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "APIServer")
}
if _, ok := jsonObject["Kubeconfig"]; !ok {
t.Errorf("%s failed: %v. Missing key %s in json object", rr.Args, err, "Kubeconfig")
t.Errorf("%q failed: %v. Missing key %s in json object", rr.Command(), err, "Kubeconfig")
}
}
@ -311,7 +313,7 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
args := []string{"dashboard", "--url", "-p", profile, "--alsologtostderr", "-v=1"}
ss, err := Start(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", args, err)
t.Errorf("failed to run minikube dashboard. args %q : %v", args, err)
}
defer func() {
ss.Stop(t)
@ -333,12 +335,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
resp, err := retryablehttp.Get(u.String())
if err != nil {
t.Errorf("failed get: %v", err)
t.Fatalf("failed to http get %q : %v", u.String(), err)
}
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("Unable to read http response body: %v", err)
t.Errorf("failed to read http response body from dashboard %q: %v", u.String(), err)
}
t.Errorf("%s returned status code %d, expected %d.\nbody:\n%s", u, resp.StatusCode, http.StatusOK, body)
}
@ -348,12 +350,12 @@ func validateDashboardCmd(ctx context.Context, t *testing.T, profile string) {
func validateDNS(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "busybox.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to kubectl replace busybox : args %q: %v", rr.Command(), err)
}
names, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4))
if err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for busybox pod : %v", err)
}
nslookup := func() error {
@ -363,12 +365,12 @@ func validateDNS(ctx context.Context, t *testing.T, profile string) {
// If the coredns process was stable, this retry wouldn't be necessary.
if err = retry.Expo(nslookup, 1*time.Second, Minutes(1)); err != nil {
t.Errorf("nslookup failing: %v", err)
t.Errorf("failed to do nslookup on kubernetes.default: %v", err)
}
want := []byte("10.96.0.1")
if !bytes.Contains(rr.Stdout.Bytes(), want) {
t.Errorf("nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
t.Errorf("failed nslookup: got=%q, want=*%q*", rr.Stdout.Bytes(), want)
}
}
@ -406,29 +408,29 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
t.Run("cache", func(t *testing.T) {
t.Run("add", func(t *testing.T) {
for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} {
_, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
if err != nil {
t.Errorf("Failed to cache image %q", img)
t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err)
}
}
})
t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) {
_, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc"))
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc"))
if err != nil {
t.Errorf("failed to delete image busybox:1.28.4-glibc from cache: %v", err)
t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err)
}
})
t.Run("list", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "list"))
if err != nil {
t.Errorf("cache list failed: %v", err)
t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err)
}
if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") {
t.Errorf("cache list did not include k8s.gcr.io/pause")
t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%q***", rr.Output())
}
if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") {
t.Errorf("cache list should not include busybox:1.28.4-glibc")
t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%q***", rr.Output())
}
})
@ -438,7 +440,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
t.Errorf("failed to get images by %q ssh %v", rr.Command(), err)
}
if !strings.Contains(rr.Output(), "1.28.4-glibc") {
t.Errorf("expected '1.28.4-glibc' to be in the output: %s", rr.Output())
t.Errorf("expected '1.28.4-glibc' to be in the output but got %q", rr.Output())
}
})
@ -453,17 +455,17 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
// make sure the image is deleted.
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
if err == nil {
t.Errorf("expected the image be deleted and get error but got nil error ! cmd: %q", rr.Command())
t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command())
}
// minikube cache reload.
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload"))
if err != nil {
t.Errorf("expected %q to run successfully but got error %v", rr.Command(), err)
t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err)
}
// make sure 'cache reload' brought back the manually deleted image.
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
if err != nil {
t.Errorf("expected to get no error for %q but got %v", rr.Command(), err)
t.Errorf("expected %q to run successfully but got error: %v", rr.Command(), err)
}
})
@ -489,16 +491,16 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) {
args := append([]string{"-p", profile, "config"}, tc.args...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil && tc.wantErr == "" {
t.Errorf("unexpected failure: %s failed: %v", rr.Args, err)
t.Errorf("failed to config minikube. args %q : %v", rr.Command(), err)
}
got := strings.TrimSpace(rr.Stdout.String())
if got != tc.wantOut {
t.Errorf("%s stdout got: %q, want: %q", rr.Command(), got, tc.wantOut)
t.Errorf("expected config output for %q to be -%q- but got *%q*", rr.Command(), tc.wantOut, got)
}
got = strings.TrimSpace(rr.Stderr.String())
if got != tc.wantErr {
t.Errorf("%s stderr got: %q, want: %q", rr.Command(), got, tc.wantErr)
t.Errorf("expected config error for %q to be -%q- but got *%q*", rr.Command(), tc.wantErr, got)
}
}
}
@ -507,11 +509,11 @@ func validateConfigCmd(ctx context.Context, t *testing.T, profile string) {
func validateLogsCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "logs"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
for _, word := range []string{"Docker", "apiserver", "Linux", "kubelet"} {
if !strings.Contains(rr.Stdout.String(), word) {
t.Errorf("minikube logs missing expected word: %q", word)
t.Errorf("excpeted minikube logs to include word: -%q- but got \n***%q***\n", word, rr.Output())
}
}
}
@ -523,16 +525,16 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
nonexistentProfile := "lis"
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", nonexistentProfile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
var profileJSON map[string][]map[string]interface{}
err = json.Unmarshal(rr.Stdout.Bytes(), &profileJSON)
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
for profileK := range profileJSON {
for _, p := range profileJSON[profileK] {
@ -548,7 +550,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
// List profiles
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to list profiles: args %q : %v", rr.Command(), err)
}
// Table output
@ -562,21 +564,20 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
}
}
if !profileExists {
t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String())
t.Errorf("expected 'profile list' output to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command())
}
})
t.Run("profile_json_output", func(t *testing.T) {
// Json output
rr, err := Run(t, exec.CommandContext(ctx, Target(), "profile", "list", "--output", "json"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to list profiles with json format. args %q: %v", rr.Command(), err)
}
var jsonObject map[string][]map[string]interface{}
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to decode json from profile list: args %q: %v", rr.Command(), err)
}
validProfiles := jsonObject["valid"]
profileExists := false
@ -587,7 +588,7 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
}
}
if !profileExists {
t.Errorf("%s failed: Missing profile '%s'. Got '\n%s\n'", rr.Args, profile, rr.Stdout.String())
t.Errorf("expected the json of 'profile list' to include %q but got *%q*. args: %q", profile, rr.Stdout.String(), rr.Command())
}
})
@ -597,56 +598,56 @@ func validateProfileCmd(ctx context.Context, t *testing.T, profile string) {
func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=gcr.io/hello-minikube-zero-install/hello-node"))
if err != nil {
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
t.Logf("%q failed: %v (may not be an error).", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "expose", "deployment", "hello-node", "--type=NodePort", "--port=8080"))
if err != nil {
t.Logf("%s failed: %v (may not be an error)", rr.Args, err)
t.Logf("%q failed: %v (may not be an error)", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "app=hello-node", Minutes(10)); err != nil {
t.Fatalf("wait: %v", err)
t.Fatalf("failed waiting for hello-node pod: %v", err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "list"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to do service list. args %q : %v", rr.Command(), err)
}
if !strings.Contains(rr.Stdout.String(), "hello-node") {
t.Errorf("service list got %q, wanted *hello-node*", rr.Stdout.String())
t.Errorf("expected 'service list' to contain *hello-node* but got -%q-", rr.Stdout.String())
}
// Test --https --url mode
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "--namespace=default", "--https", "--url", "hello-node"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to get service url. args %q : %v", rr.Command(), err)
}
if rr.Stderr.String() != "" {
t.Errorf("unexpected stderr output: %s", rr.Stderr)
t.Errorf("expected stderr to be empty but got *%q*", rr.Stderr)
}
endpoint := strings.TrimSpace(rr.Stdout.String())
u, err := url.Parse(endpoint)
if err != nil {
t.Fatalf("failed to parse %q: %v", endpoint, err)
t.Fatalf("failed to parse service url endpoint %q: %v", endpoint, err)
}
if u.Scheme != "https" {
t.Errorf("got scheme: %q, expected: %q", u.Scheme, "https")
t.Errorf("expected scheme to be 'https' but got %q", u.Scheme)
}
// Test --format=IP
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url", "--format={{.IP}}"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to get service url with custom format. args %q: %v", rr.Command(), err)
}
if strings.TrimSpace(rr.Stdout.String()) != u.Hostname() {
t.Errorf("%s = %q, wanted %q", rr.Args, rr.Stdout.String(), u.Hostname())
t.Errorf("expected 'service --format={{.IP}}' output to be -%q- but got *%q* . args %q.", u.Hostname(), rr.Stdout.String(), rr.Command())
}
// Test a regular URLminikube
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "service", "hello-node", "--url"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to get service url. args: %q: %v", rr.Command(), err)
}
endpoint = strings.TrimSpace(rr.Stdout.String())
@ -655,7 +656,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("failed to parse %q: %v", endpoint, err)
}
if u.Scheme != "http" {
t.Fatalf("got scheme: %q, expected: %q", u.Scheme, "http")
t.Fatalf("expected scheme to be -%q- got scheme: *%q*", "http", u.Scheme)
}
t.Logf("url: %s", endpoint)
@ -664,7 +665,7 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("get failed: %v\nresp: %v", err, resp)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("%s = status code %d, want %d", u, resp.StatusCode, http.StatusOK)
t.Fatalf("expected status code for %q to be -%q- but got *%q*", endpoint, http.StatusOK, resp.StatusCode)
}
}
@ -673,23 +674,23 @@ func validateAddonsCmd(ctx context.Context, t *testing.T, profile string) {
// Table output
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to do addon list: args %q : %v", rr.Command(), err)
}
for _, a := range []string{"dashboard", "ingress", "ingress-dns"} {
if !strings.Contains(rr.Output(), a) {
t.Errorf("addon list expected to include %q but didn't output: %q", a, rr.Output())
t.Errorf("expected 'addon list' output to include -%q- but got *%q*", a, rr.Output())
}
}
// Json output
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "list", "-o", "json"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to do addon list with json output. args %q: %v", rr.Command(), err)
}
var jsonObject map[string]interface{}
err = json.Unmarshal(rr.Stdout.Bytes(), &jsonObject)
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to decode addon list json output : %v", err)
}
}
@ -701,10 +702,10 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) {
want := "hello\n"
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("echo hello")))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to run an ssh command. args %q : %v", rr.Command(), err)
}
if rr.Stdout.String() != want {
t.Errorf("%v = %q, want = %q", rr.Args, rr.Stdout.String(), want)
t.Errorf("expected minikube ssh command output to be -%q- but got *%q*. args %q", want, rr.Stdout.String(), rr.Command())
}
}
@ -712,12 +713,12 @@ func validateSSHCmd(ctx context.Context, t *testing.T, profile string) {
func validateMySQL(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "mysql.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to kubectl replace mysql: args %q failed: %v", rr.Command(), err)
}
names, err := PodWait(ctx, t, profile, "default", "app=mysql", Minutes(10))
if err != nil {
t.Fatalf("podwait: %v", err)
t.Fatalf("failed waiting for mysql pod: %v", err)
}
// Retry, as mysqld first comes up without users configured. Scan for names in case of a reschedule.
@ -725,8 +726,8 @@ func validateMySQL(ctx context.Context, t *testing.T, profile string) {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", names[0], "--", "mysql", "-ppassword", "-e", "show databases;"))
return err
}
if err = retry.Expo(mysql, 2*time.Second, Seconds(180)); err != nil {
t.Errorf("mysql failing: %v", err)
if err = retry.Expo(mysql, 1*time.Second, Seconds(200)); err != nil {
t.Errorf("failed to exec 'mysql -ppassword -e show databases;': %v", err)
}
}
@ -756,12 +757,12 @@ func setupFileSync(ctx context.Context, t *testing.T, profile string) {
t.Logf("local sync path: %s", p)
err := copy.Copy("./testdata/sync.test", p)
if err != nil {
t.Fatalf("copy: %v", err)
t.Fatalf("failed to copy ./testdata/sync.test : %v", err)
}
err = copy.Copy("./testdata/minikube_test.pem", localTestCertPath())
if err != nil {
t.Fatalf("copy: %v", err)
t.Fatalf("failed to copy ./testdata/minikube_test.pem : %v", err)
}
}
@ -775,14 +776,14 @@ func validateFileSync(ctx context.Context, t *testing.T, profile string) {
t.Logf("Checking for existence of %s within VM", vp)
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp)))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
got := rr.Stdout.String()
t.Logf("file sync test content: %s", got)
expected, err := ioutil.ReadFile("./testdata/sync.test")
if err != nil {
t.Errorf("test file not found: %v", err)
t.Errorf("failed to read test file '/testdata/sync.test' : %v", err)
}
if diff := cmp.Diff(string(expected), got); diff != "" {
@ -812,13 +813,13 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) {
t.Logf("Checking for existence of %s within VM", vp)
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat %s", vp)))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to check existence of %q inside minikube. args %q: %v", vp, rr.Command(), err)
}
// Strip carriage returned by ssh
got := strings.Replace(rr.Stdout.String(), "\r", "", -1)
if diff := cmp.Diff(string(want), got); diff != "" {
t.Errorf("minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff)
t.Errorf("failed verify pem file. minikube_test.pem -> %s mismatch (-want +got):\n%s", vp, diff)
}
}
}
@ -827,7 +828,7 @@ func validateCertSync(ctx context.Context, t *testing.T, profile string) {
func validateUpdateContextCmd(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "update-context", "--alsologtostderr", "-v=2"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to run minikube update-context: args %q: %v", rr.Command(), err)
}
want := []byte("IP was already correctly configured")

View File

@ -27,6 +27,7 @@ import (
"k8s.io/minikube/pkg/minikube/vmpath"
)
// TestGuestEnvironment verifies files and packges installed inside minikube ISO/Base image
func TestGuestEnvironment(t *testing.T) {
MaybeParallel(t)
@ -37,18 +38,18 @@ func TestGuestEnvironment(t *testing.T) {
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=1800", "--wait=false"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err)
}
// Run as a group so that our defer doesn't happen as tests are runnings
t.Run("Binaries", func(t *testing.T) {
for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService"} {
for _, pkg := range []string{"git", "rsync", "curl", "wget", "socat", "iptables", "VBoxControl", "VBoxService", "crictl", "podman", "docker"} {
pkg := pkg
t.Run(pkg, func(t *testing.T) {
t.Parallel()
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("which %s", pkg)))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to verify existance of %q binary : args %q: %v", pkg, rr.Command(), err)
}
})
}
@ -67,9 +68,9 @@ func TestGuestEnvironment(t *testing.T) {
mount := mount
t.Run(mount, func(t *testing.T) {
t.Parallel()
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount)))
rr, err := Run(t, exec.CommandContext(ctx, Targt(), "-p", profile, "ssh", fmt.Sprintf("df -t ext4 %s | grep %s", mount, mount)))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to verify existance of %q mount. args %q: %v", mount, rr.Command(), err)
}
})
}

View File

@ -50,59 +50,59 @@ func TestGvisorAddon(t *testing.T) {
startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err)
}
// If it exists, include a locally built gvisor image
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", "gcr.io/k8s-minikube/gvisor-addon:2"))
if err != nil {
t.Logf("%s failed: %v (won't test local image)", rr.Args, err)
t.Logf("%s failed: %v (won't test local image)", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "enable", "gvisor"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil {
t.Fatalf("waiting for gvisor controller to be up: %v", err)
t.Fatalf("failed waiting for 'gvisor controller' pod: %v", err)
}
// Create an untrusted workload
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-untrusted.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// Create gvisor workload
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-gvisor.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
t.Errorf("nginx: %v", err)
t.Errorf("failed waiting for nginx pod: %v", err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
t.Errorf("nginx: %v", err)
t.Errorf("failed waitinf for gvisor pod: %v", err)
}
// Ensure that workloads survive a restart
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("faild stopping minikube. args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed starting minikube after a stop. args %q, %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "kube-system", "kubernetes.io/minikube-addons=gvisor", Minutes(4)); err != nil {
t.Errorf("waiting for gvisor controller to be up: %v", err)
t.Errorf("failed waiting for 'gvisor controller' pod : %v", err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,untrusted=true", Minutes(4)); err != nil {
t.Errorf("nginx: %v", err)
t.Errorf("failed waiting for 'nginx' pod : %v", err)
}
if _, err := PodWait(ctx, t, profile, "default", "run=nginx,runtime=gvisor", Minutes(4)); err != nil {
t.Errorf("nginx: %v", err)
t.Errorf("failed waiting for 'gvisor' pod : %v", err)
}
}

View File

@ -63,14 +63,24 @@ func (rr RunResult) Command() string {
return sb.String()
}
// indentLines indents every line in a bytes.Buffer and returns it as string
func indentLines(b []byte) string {
scanner := bufio.NewScanner(bytes.NewReader(b))
var lines string
for scanner.Scan() {
lines = lines + "\t" + scanner.Text() + "\n"
}
return lines
}
// Output returns human-readable output for an execution result
func (rr RunResult) Output() string {
var sb strings.Builder
if rr.Stdout.Len() > 0 {
sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes()))
sb.WriteString(fmt.Sprintf("\n-- stdout --\n%s\n-- /stdout --", indentLines(rr.Stdout.Bytes())))
}
if rr.Stderr.Len() > 0 {
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes()))
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", indentLines(rr.Stderr.Bytes())))
}
return sb.String()
}

View File

@ -46,22 +46,22 @@ func TestChangeNoneUser(t *testing.T) {
startArgs := append([]string{"CHANGE_MINIKUBE_NONE_USER=true", Target(), "start", "--wait=false"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "/usr/bin/env", startArgs...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "status"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("%s failed: %v", rr.Command(), err)
}
username := os.Getenv("SUDO_USER")

View File

@ -92,7 +92,7 @@ func TestStartStop(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed starting minikube -first start-. args %q: %v", rr.Command(), err)
}
if !strings.Contains(tc.name, "cni") {
@ -101,43 +101,43 @@ func TestStartStop(t *testing.T) {
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile, "--alsologtostderr", "-v=3"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed stopping minikube - first stop-. args %q : %v", rr.Command(), err)
}
// The none driver never really stops
if !NoneDriver() {
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Stopped.String() {
t.Errorf("post-stop host status = %q; want = %q", got, state.Stopped)
t.Errorf("expected post-stop host status to be -%q- but got *%q*", state.Stopped, got)
}
}
// Enable an addon to assert it comes up afterwards
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
// Explicit fatal so that failures don't move directly to deletion
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("failed to start minikube post-stop. args %q: %v", rr.Command(), err)
}
if strings.Contains(tc.name, "cni") {
t.Logf("WARNING: cni mode requires additional setup before pods can schedule :(")
} else {
if _, err := PodWait(ctx, t, profile, "default", "integration-test=busybox", Minutes(4)); err != nil {
t.Fatalf("post-stop-start pod wait: %v", err)
t.Fatalf("failed waiting for pod 'busybox' post-stop-start: %v", err)
}
if _, err := PodWait(ctx, t, profile, "kubernetes-dashboard", "k8s-app=kubernetes-dashboard", Minutes(4)); err != nil {
t.Fatalf("post-stop-start addon wait: %v", err)
t.Fatalf("failed waiting for 'addon dashboard' pod post-stop-start: %v", err)
}
}
got := Status(ctx, t, Target(), profile, "Host")
if got != state.Running.String() {
t.Errorf("post-start host status = %q; want = %q", got, state.Running)
t.Errorf("expected host status after start-stop-start to be -%q- but got *%q*", state.Running, got)
}
if !NoneDriver() {
@ -150,7 +150,7 @@ func TestStartStop(t *testing.T) {
// Normally handled by cleanuprofile, but not fatal there
rr, err = Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to clean up: args %q: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "config", "get-contexts", profile))
@ -158,7 +158,7 @@ func TestStartStop(t *testing.T) {
t.Logf("config context error: %v (may be ok)", err)
}
if rr.ExitCode != 1 {
t.Errorf("wanted exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
t.Errorf("expected exit code 1, got %d. output: %s", rr.ExitCode, rr.Output())
}
}
})
@ -182,14 +182,14 @@ func TestStartStopWithPreload(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// Now, pull the busybox image into the VMs docker daemon
image := "busybox"
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// Restart minikube with v1.17.3, which has a preloaded tarball
@ -199,11 +199,11 @@ func TestStartStopWithPreload(t *testing.T) {
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion))
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
if !strings.Contains(rr.Output(), image) {
t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output())
@ -217,7 +217,7 @@ func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
// schedule a pod to assert persistence
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "busybox.yaml")))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// 8 minutes, because 4 is not enough for images to pull in all cases.
@ -250,14 +250,14 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
rr, err := Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "sudo crictl images -o json"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed tp get images inside minikube. args %q: %v", rr.Command(), err)
}
jv := map[string][]struct {
Tags []string `json:"repoTags"`
}{}
err = json.Unmarshal(rr.Stdout.Bytes(), &jv)
if err != nil {
t.Errorf("images unmarshal: %v", err)
t.Errorf("failed to decode images json %v. output: %q", err, rr.Output())
}
found := map[string]bool{}
for _, img := range jv["images"] {
@ -274,7 +274,7 @@ func testPulledImages(ctx context.Context, t *testing.T, profile string, version
}
want, err := images.Kubeadm("", version)
if err != nil {
t.Errorf("kubeadm images: %v", version)
t.Errorf("failed to get kubeadm images for %s : %v", version, err)
}
gotImages := []string{}
for k := range found {
@ -293,7 +293,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "pause", "-p", profile, "--alsologtostderr", "-v=1"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
got := Status(ctx, t, Target(), profile, "APIServer")
@ -308,7 +308,7 @@ func testPause(ctx context.Context, t *testing.T, profile string) {
rr, err = Run(t, exec.CommandContext(ctx, Target(), "unpause", "-p", profile, "--alsologtostderr", "-v=1"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
got = Status(ctx, t, Target(), profile, "APIServer")

View File

@ -82,22 +82,23 @@ func TestVersionUpgrade(t *testing.T) {
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "stop", "-p", profile))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
t.Fatalf("%s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), "-p", profile, "status", "--format={{.Host}}"))
if err != nil {
t.Logf("status error: %v (may be ok)", err)
}
got := strings.TrimSpace(rr.Stdout.String())
if got != state.Stopped.String() {
t.Errorf("status = %q; want = %q", got, state.Stopped.String())
t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String())
}
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("failed to start minikube HEAD with newest k8s version. args: %s : %v", rr.Command(), err)
}
s, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "version", "--output=json"))
@ -119,14 +120,16 @@ func TestVersionUpgrade(t *testing.T) {
t.Fatalf("expected server version %s is not the same with latest version %s", cv.ServerVersion.GitVersion, constants.NewestKubernetesVersion)
}
t.Logf("Attempting to downgrade Kubernetes (should fail)")
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
if rr, err := Run(t, exec.CommandContext(ctx, tf.Name(), args...)); err == nil {
t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Args)
t.Fatalf("downgrading kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command())
}
t.Logf("Attempting restart after unsuccessful downgrade")
args = append([]string{"start", "-p", profile, fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
t.Errorf("start after failed upgrade: %v", err)
}
}