diff --git a/test/integration/aaa_download_only_test.go b/test/integration/aaa_download_only_test.go index 4f195eacb3..1b59c7136f 100644 --- a/test/integration/aaa_download_only_test.go +++ b/test/integration/aaa_download_only_test.go @@ -40,153 +40,151 @@ import ( ) func TestDownloadOnly(t *testing.T) { - for _, r := range []string{"crio", "docker", "containerd"} { - t.Run(r, func(t *testing.T) { - // Stores the startup run result for later error messages - var rrr *RunResult + // Stores the startup run result for later error messages + var rrr *RunResult + profile := UniqueProfileName("download-only") + ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + defer Cleanup(t, profile, cancel) + containerRuntime := ContainerRuntime() - profile := UniqueProfileName(r) - ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) - defer Cleanup(t, profile, cancel) + versions := []string{ + constants.OldestKubernetesVersion, + constants.DefaultKubernetesVersion, + constants.NewestKubernetesVersion, + } - versions := []string{ - constants.OldestKubernetesVersion, - constants.DefaultKubernetesVersion, - constants.NewestKubernetesVersion, - } + // Small optimization, don't run the exact same set of tests twice + if constants.DefaultKubernetesVersion == constants.NewestKubernetesVersion { + versions = versions[:len(versions)-1] + } - // Small optimization, don't run the exact same set of tests twice - if constants.DefaultKubernetesVersion == constants.NewestKubernetesVersion { - versions = versions[:len(versions)-1] - } + for _, v := range versions { + t.Run(v, func(t *testing.T) { + defer PostMortemLogs(t, profile) - for _, v := range versions { - t.Run(v, func(t *testing.T) { - defer PostMortemLogs(t, profile) - - t.Run("check json events", func(t *testing.T) { - // --force to avoid uid check - args := append([]string{"start", "-o=json", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...) - rt, err := Run(t, exec.CommandContext(ctx, Target(), args...)) - if rrr == nil { - // Preserve the initial run-result for debugging - rrr = rt - } - if err != nil { - t.Errorf("failed to download only. args: %q %v", args, err) - } - - s := bufio.NewScanner(bytes.NewReader(rt.Stdout.Bytes())) - for s.Scan() { - var rtObj map[string]interface{} - err := json.Unmarshal(s.Bytes(), &rtObj) - if err != nil { - t.Errorf("failed to parse output: %v", err) - } else if step, ok := rtObj["data"]; ok { - if stepMap, ok := step.(map[string]interface{}); ok { - if stepMap["currentstep"] == "" { - t.Errorf("Empty step number for %v", stepMap["name"]) - } - } - } - } - }) - - preloadExists := false - t.Run("check preload exists", func(t *testing.T) { - // skip for none, as none driver does not have preload feature. - if NoneDriver() { - t.Skip("None driver does not have preload") - } - if download.PreloadExists(v, r, true) { - // Just make sure the tarball path exists - if _, err := os.Stat(download.TarballPath(v, r)); err != nil { - t.Errorf("failed to verify preloaded tarball file exists: %v", err) - } - preloadExists = true - } else { - t.Skip("No preload image") - } - }) - - t.Run("check cached images", func(t *testing.T) { - // skip verify for cache images if --driver=none - if NoneDriver() { - t.Skip("None driver has no cache") - } - if preloadExists { - t.Skip("Preload exists, images won't be cached") - } - imgs, err := images.Kubeadm("", v) - if err != nil { - t.Errorf("failed to get kubeadm images for %v: %+v", v, err) - } - - for _, img := range imgs { - img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 - fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected image file exist at %q but got error: %v", fp, err) - } - } - }) - - t.Run("check binaries", func(t *testing.T) { - // checking binaries downloaded (kubelet,kubeadm) - for _, bin := range constants.KubernetesReleaseBinaries { - fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) - _, err := os.Stat(fp) - if err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) - } - } - }) - - t.Run("check kubectl", func(t *testing.T) { - // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded - // as well for the `minikube kubectl` command - if runtime.GOOS == "linux" { - t.Skip("Test for darwin and windows") - } - binary := "kubectl" - if runtime.GOOS == "windows" { - binary = "kubectl.exe" - } - fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) - if _, err := os.Stat(fp); err != nil { - t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) - } - }) - }) - } - - // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! - t.Run("DeleteAll", func(t *testing.T) { - defer PostMortemLogs(t, profile) - - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") + t.Run("check json events", func(t *testing.T) { + // --force to avoid uid check + args := append([]string{"start", "-o=json", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", containerRuntime)}, StartArgs()...) + rt, err := Run(t, exec.CommandContext(ctx, Target(), args...)) + if rrr == nil { + // Preserve the initial run-result for debugging + rrr = rt } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) if err != nil { - t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err) + t.Errorf("failed to download only. args: %q %v", args, err) + } + + s := bufio.NewScanner(bytes.NewReader(rt.Stdout.Bytes())) + for s.Scan() { + var rtObj map[string]interface{} + err := json.Unmarshal(s.Bytes(), &rtObj) + if err != nil { + t.Errorf("failed to parse output: %v", err) + } else if step, ok := rtObj["data"]; ok { + if stepMap, ok := step.(map[string]interface{}); ok { + if stepMap["currentstep"] == "" { + t.Errorf("Empty step number for %v", stepMap["name"]) + } + } + } } }) - // Delete should always succeed, even if previously partially or fully deleted. - t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { - defer PostMortemLogs(t, profile) - if !CanCleanup() { - t.Skip("skipping, as cleanup is disabled") + preloadExists := false + t.Run("check preload exists", func(t *testing.T) { + // skip for none, as none driver does not have preload feature. + if NoneDriver() { + t.Skip("None driver does not have preload") } - rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) - if err != nil { - t.Errorf("failed to delete. args: %q: %v", rr.Command(), err) + if download.PreloadExists(v, containerRuntime, true) { + // Just make sure the tarball path exists + if _, err := os.Stat(download.TarballPath(v, containerRuntime)); err != nil { + t.Errorf("failed to verify preloaded tarball file exists: %v", err) + } + preloadExists = true + } else { + t.Skip("No preload image") } }) + + t.Run("check cached images", func(t *testing.T) { + // skip verify for cache images if --driver=none + if NoneDriver() { + t.Skip("None driver has no cache") + } + if preloadExists { + t.Skip("Preload exists, images won't be cached") + } + imgs, err := images.Kubeadm("", v) + if err != nil { + t.Errorf("failed to get kubeadm images for %v: %+v", v, err) + } + + for _, img := range imgs { + img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2 + fp := filepath.Join(localpath.MiniPath(), "cache", "images", img) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected image file exist at %q but got error: %v", fp, err) + } + } + }) + + t.Run("check binaries", func(t *testing.T) { + // checking binaries downloaded (kubelet,kubeadm) + for _, bin := range constants.KubernetesReleaseBinaries { + fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin) + _, err := os.Stat(fp) + if err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + } + }) + + t.Run("check kubectl", func(t *testing.T) { + // If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded + // as well for the `minikube kubectl` command + if runtime.GOOS == "linux" { + t.Skip("Test for darwin and windows") + } + binary := "kubectl" + if runtime.GOOS == "windows" { + binary = "kubectl.exe" + } + fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary) + if _, err := os.Stat(fp); err != nil { + t.Errorf("expected the file for binary exist at %q but got error %v", fp, err) + } + }) + }) } + + // This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete! + t.Run("DeleteAll", func(t *testing.T) { + defer PostMortemLogs(t, profile) + + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all")) + if err != nil { + t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err) + } + }) + // Delete should always succeed, even if previously partially or fully deleted. + t.Run("DeleteAlwaysSucceeds", func(t *testing.T) { + defer PostMortemLogs(t, profile) + + if !CanCleanup() { + t.Skip("skipping, as cleanup is disabled") + } + rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile)) + if err != nil { + t.Errorf("failed to delete. args: %q: %v", rr.Command(), err) + } + }) + } func TestDownloadOnlyKic(t *testing.T) { diff --git a/test/integration/aab_offline_test.go b/test/integration/aab_offline_test.go index ced6b5da2d..8943d1b634 100644 --- a/test/integration/aab_offline_test.go +++ b/test/integration/aab_offline_test.go @@ -26,41 +26,34 @@ import ( "testing" ) -// TestOffline makes sure minikube works without internet, once it the user has already cached the images, This test has to run after TestDownloadOnly! +// TestOffline makes sure minikube works without internet, once it the user has already cached the images, This test has to run after TestDownloadOnly func TestOffline(t *testing.T) { - t.Run("group", func(t *testing.T) { - for _, rt := range []string{"docker", "crio", "containerd"} { - rt := rt - t.Run(rt, func(t *testing.T) { - MaybeParallel(t) + MaybeParallel(t) + rt := ContainerRuntime() + if rt != "docker" && arm64Platform() { + t.Skipf("skipping %s - only docker runtime supported on arm64. See https://github.com/kubernetes/minikube/issues/10144", t.Name()) + } - if rt != "docker" && arm64Platform() { - t.Skipf("skipping %s - only docker runtime supported on arm64. See https://github.com/kubernetes/minikube/issues/10144", t.Name()) - } + if rt != "docker" && NoneDriver() { + t.Skipf("skipping %s - incompatible with none driver", t.Name()) + } - if rt != "docker" && NoneDriver() { - t.Skipf("skipping %s - incompatible with none driver", t.Name()) - } + profile := UniqueProfileName(fmt.Sprintf("offline-%s", rt)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) + defer CleanupWithLogs(t, profile, cancel) - profile := UniqueProfileName(fmt.Sprintf("offline-%s", rt)) - ctx, cancel := context.WithTimeout(context.Background(), Minutes(15)) - defer CleanupWithLogs(t, profile, cancel) + startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2000", "--wait=true"} + startArgs = append(startArgs, StartArgs()...) + c := exec.CommandContext(ctx, Target(), startArgs...) + env := os.Environ() + // RFC1918 address that unlikely to host working a proxy server + env = append(env, "HTTP_PROXY=172.16.1.1:1") + env = append(env, "HTTP_PROXYS=172.16.1.1:1") - startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2000", "--wait=true", "--container-runtime", rt} - startArgs = append(startArgs, StartArgs()...) - c := exec.CommandContext(ctx, Target(), startArgs...) - env := os.Environ() - // RFC1918 address that unlikely to host working a proxy server - env = append(env, "HTTP_PROXY=172.16.1.1:1") - env = append(env, "HTTP_PROXYS=172.16.1.1:1") - - c.Env = env - rr, err := Run(t, c) - if err != nil { - // Fatal so that we may collect logs before stop/delete steps - t.Fatalf("%s failed: %v", rr.Command(), err) - } - }) - } - }) + c.Env = env + rr, err := Run(t, c) + if err != nil { + // Fatal so that we may collect logs before stop/delete steps + t.Fatalf("%s failed: %v", rr.Command(), err) + } } diff --git a/test/integration/testdata/mysql.yaml b/test/integration/testdata/mysql.yaml index fd90187228..986430cc76 100644 --- a/test/integration/testdata/mysql.yaml +++ b/test/integration/testdata/mysql.yaml @@ -24,8 +24,15 @@ spec: app: mysql spec: containers: - - image: mysql:5.6 + - image: mysql:5.7 name: mysql + resources: + requests: + memory: "512Mi" + cpu: "600m" + limits: + memory: "700Mi" + cpu: "700m" env: # Use secret in real usage - name: MYSQL_ROOT_PASSWORD