Merge branch 'master' of github.com:kubernetes/minikube into fix-download-test
commit
3e7a72129f
|
@ -40,153 +40,151 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDownloadOnly(t *testing.T) {
|
func TestDownloadOnly(t *testing.T) {
|
||||||
for _, r := range []string{"crio", "docker", "containerd"} {
|
// Stores the startup run result for later error messages
|
||||||
t.Run(r, func(t *testing.T) {
|
var rrr *RunResult
|
||||||
// Stores the startup run result for later error messages
|
profile := UniqueProfileName("download-only")
|
||||||
var rrr *RunResult
|
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||||
|
defer Cleanup(t, profile, cancel)
|
||||||
|
containerRuntime := ContainerRuntime()
|
||||||
|
|
||||||
profile := UniqueProfileName(r)
|
versions := []string{
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
constants.OldestKubernetesVersion,
|
||||||
defer Cleanup(t, profile, cancel)
|
constants.DefaultKubernetesVersion,
|
||||||
|
constants.NewestKubernetesVersion,
|
||||||
|
}
|
||||||
|
|
||||||
versions := []string{
|
// Small optimization, don't run the exact same set of tests twice
|
||||||
constants.OldestKubernetesVersion,
|
if constants.DefaultKubernetesVersion == constants.NewestKubernetesVersion {
|
||||||
constants.DefaultKubernetesVersion,
|
versions = versions[:len(versions)-1]
|
||||||
constants.NewestKubernetesVersion,
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Small optimization, don't run the exact same set of tests twice
|
for _, v := range versions {
|
||||||
if constants.DefaultKubernetesVersion == constants.NewestKubernetesVersion {
|
t.Run(v, func(t *testing.T) {
|
||||||
versions = versions[:len(versions)-1]
|
defer PostMortemLogs(t, profile)
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range versions {
|
t.Run("check json events", func(t *testing.T) {
|
||||||
t.Run(v, func(t *testing.T) {
|
// --force to avoid uid check
|
||||||
defer PostMortemLogs(t, profile)
|
args := append([]string{"start", "-o=json", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", containerRuntime)}, StartArgs()...)
|
||||||
|
rt, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||||
t.Run("check json events", func(t *testing.T) {
|
if rrr == nil {
|
||||||
// --force to avoid uid check
|
// Preserve the initial run-result for debugging
|
||||||
args := append([]string{"start", "-o=json", "--download-only", "-p", profile, "--force", "--alsologtostderr", fmt.Sprintf("--kubernetes-version=%s", v), fmt.Sprintf("--container-runtime=%s", r)}, StartArgs()...)
|
rrr = rt
|
||||||
rt, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
|
||||||
if rrr == nil {
|
|
||||||
// Preserve the initial run-result for debugging
|
|
||||||
rrr = rt
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to download only. args: %q %v", args, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := bufio.NewScanner(bytes.NewReader(rt.Stdout.Bytes()))
|
|
||||||
for s.Scan() {
|
|
||||||
var rtObj map[string]interface{}
|
|
||||||
err := json.Unmarshal(s.Bytes(), &rtObj)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to parse output: %v", err)
|
|
||||||
} else if step, ok := rtObj["data"]; ok {
|
|
||||||
if stepMap, ok := step.(map[string]interface{}); ok {
|
|
||||||
if stepMap["currentstep"] == "" {
|
|
||||||
t.Errorf("Empty step number for %v", stepMap["name"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
preloadExists := false
|
|
||||||
t.Run("check preload exists", func(t *testing.T) {
|
|
||||||
// skip for none, as none driver does not have preload feature.
|
|
||||||
if NoneDriver() {
|
|
||||||
t.Skip("None driver does not have preload")
|
|
||||||
}
|
|
||||||
if download.PreloadExists(v, r, true) {
|
|
||||||
// Just make sure the tarball path exists
|
|
||||||
if _, err := os.Stat(download.TarballPath(v, r)); err != nil {
|
|
||||||
t.Errorf("failed to verify preloaded tarball file exists: %v", err)
|
|
||||||
}
|
|
||||||
preloadExists = true
|
|
||||||
} else {
|
|
||||||
t.Skip("No preload image")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("check cached images", func(t *testing.T) {
|
|
||||||
// skip verify for cache images if --driver=none
|
|
||||||
if NoneDriver() {
|
|
||||||
t.Skip("None driver has no cache")
|
|
||||||
}
|
|
||||||
if preloadExists {
|
|
||||||
t.Skip("Preload exists, images won't be cached")
|
|
||||||
}
|
|
||||||
imgs, err := images.Kubeadm("", v)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to get kubeadm images for %v: %+v", v, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, img := range imgs {
|
|
||||||
img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2
|
|
||||||
fp := filepath.Join(localpath.MiniPath(), "cache", "images", img)
|
|
||||||
_, err := os.Stat(fp)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected image file exist at %q but got error: %v", fp, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("check binaries", func(t *testing.T) {
|
|
||||||
// checking binaries downloaded (kubelet,kubeadm)
|
|
||||||
for _, bin := range constants.KubernetesReleaseBinaries {
|
|
||||||
fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin)
|
|
||||||
_, err := os.Stat(fp)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("check kubectl", func(t *testing.T) {
|
|
||||||
// If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded
|
|
||||||
// as well for the `minikube kubectl` command
|
|
||||||
if runtime.GOOS == "linux" {
|
|
||||||
t.Skip("Test for darwin and windows")
|
|
||||||
}
|
|
||||||
binary := "kubectl"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
binary = "kubectl.exe"
|
|
||||||
}
|
|
||||||
fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary)
|
|
||||||
if _, err := os.Stat(fp); err != nil {
|
|
||||||
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete!
|
|
||||||
t.Run("DeleteAll", func(t *testing.T) {
|
|
||||||
defer PostMortemLogs(t, profile)
|
|
||||||
|
|
||||||
if !CanCleanup() {
|
|
||||||
t.Skip("skipping, as cleanup is disabled")
|
|
||||||
}
|
}
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err)
|
t.Errorf("failed to download only. args: %q %v", args, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := bufio.NewScanner(bytes.NewReader(rt.Stdout.Bytes()))
|
||||||
|
for s.Scan() {
|
||||||
|
var rtObj map[string]interface{}
|
||||||
|
err := json.Unmarshal(s.Bytes(), &rtObj)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to parse output: %v", err)
|
||||||
|
} else if step, ok := rtObj["data"]; ok {
|
||||||
|
if stepMap, ok := step.(map[string]interface{}); ok {
|
||||||
|
if stepMap["currentstep"] == "" {
|
||||||
|
t.Errorf("Empty step number for %v", stepMap["name"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
// Delete should always succeed, even if previously partially or fully deleted.
|
|
||||||
t.Run("DeleteAlwaysSucceeds", func(t *testing.T) {
|
|
||||||
defer PostMortemLogs(t, profile)
|
|
||||||
|
|
||||||
if !CanCleanup() {
|
preloadExists := false
|
||||||
t.Skip("skipping, as cleanup is disabled")
|
t.Run("check preload exists", func(t *testing.T) {
|
||||||
|
// skip for none, as none driver does not have preload feature.
|
||||||
|
if NoneDriver() {
|
||||||
|
t.Skip("None driver does not have preload")
|
||||||
}
|
}
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
if download.PreloadExists(v, containerRuntime, true) {
|
||||||
if err != nil {
|
// Just make sure the tarball path exists
|
||||||
t.Errorf("failed to delete. args: %q: %v", rr.Command(), err)
|
if _, err := os.Stat(download.TarballPath(v, containerRuntime)); err != nil {
|
||||||
|
t.Errorf("failed to verify preloaded tarball file exists: %v", err)
|
||||||
|
}
|
||||||
|
preloadExists = true
|
||||||
|
} else {
|
||||||
|
t.Skip("No preload image")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("check cached images", func(t *testing.T) {
|
||||||
|
// skip verify for cache images if --driver=none
|
||||||
|
if NoneDriver() {
|
||||||
|
t.Skip("None driver has no cache")
|
||||||
|
}
|
||||||
|
if preloadExists {
|
||||||
|
t.Skip("Preload exists, images won't be cached")
|
||||||
|
}
|
||||||
|
imgs, err := images.Kubeadm("", v)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to get kubeadm images for %v: %+v", v, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, img := range imgs {
|
||||||
|
img = strings.Replace(img, ":", "_", 1) // for example kube-scheduler:v1.15.2 --> kube-scheduler_v1.15.2
|
||||||
|
fp := filepath.Join(localpath.MiniPath(), "cache", "images", img)
|
||||||
|
_, err := os.Stat(fp)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected image file exist at %q but got error: %v", fp, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("check binaries", func(t *testing.T) {
|
||||||
|
// checking binaries downloaded (kubelet,kubeadm)
|
||||||
|
for _, bin := range constants.KubernetesReleaseBinaries {
|
||||||
|
fp := filepath.Join(localpath.MiniPath(), "cache", "linux", v, bin)
|
||||||
|
_, err := os.Stat(fp)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("check kubectl", func(t *testing.T) {
|
||||||
|
// If we are on darwin/windows, check to make sure OS specific kubectl has been downloaded
|
||||||
|
// as well for the `minikube kubectl` command
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
t.Skip("Test for darwin and windows")
|
||||||
|
}
|
||||||
|
binary := "kubectl"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
binary = "kubectl.exe"
|
||||||
|
}
|
||||||
|
fp := filepath.Join(localpath.MiniPath(), "cache", runtime.GOOS, v, binary)
|
||||||
|
if _, err := os.Stat(fp); err != nil {
|
||||||
|
t.Errorf("expected the file for binary exist at %q but got error %v", fp, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is a weird place to test profile deletion, but this test is serial, and we have a profile to delete!
|
||||||
|
t.Run("DeleteAll", func(t *testing.T) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
if !CanCleanup() {
|
||||||
|
t.Skip("skipping, as cleanup is disabled")
|
||||||
|
}
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "--all"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to delete all. args: %q : %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// Delete should always succeed, even if previously partially or fully deleted.
|
||||||
|
t.Run("DeleteAlwaysSucceeds", func(t *testing.T) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
if !CanCleanup() {
|
||||||
|
t.Skip("skipping, as cleanup is disabled")
|
||||||
|
}
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "delete", "-p", profile))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to delete. args: %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDownloadOnlyKic(t *testing.T) {
|
func TestDownloadOnlyKic(t *testing.T) {
|
||||||
|
|
|
@ -26,41 +26,34 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestOffline makes sure minikube works without internet, once it the user has already cached the images, This test has to run after TestDownloadOnly!
|
// TestOffline makes sure minikube works without internet, once it the user has already cached the images, This test has to run after TestDownloadOnly
|
||||||
func TestOffline(t *testing.T) {
|
func TestOffline(t *testing.T) {
|
||||||
t.Run("group", func(t *testing.T) {
|
MaybeParallel(t)
|
||||||
for _, rt := range []string{"docker", "crio", "containerd"} {
|
rt := ContainerRuntime()
|
||||||
rt := rt
|
if rt != "docker" && arm64Platform() {
|
||||||
t.Run(rt, func(t *testing.T) {
|
t.Skipf("skipping %s - only docker runtime supported on arm64. See https://github.com/kubernetes/minikube/issues/10144", t.Name())
|
||||||
MaybeParallel(t)
|
}
|
||||||
|
|
||||||
if rt != "docker" && arm64Platform() {
|
if rt != "docker" && NoneDriver() {
|
||||||
t.Skipf("skipping %s - only docker runtime supported on arm64. See https://github.com/kubernetes/minikube/issues/10144", t.Name())
|
t.Skipf("skipping %s - incompatible with none driver", t.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
if rt != "docker" && NoneDriver() {
|
profile := UniqueProfileName(fmt.Sprintf("offline-%s", rt))
|
||||||
t.Skipf("skipping %s - incompatible with none driver", t.Name())
|
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
|
||||||
}
|
defer CleanupWithLogs(t, profile, cancel)
|
||||||
|
|
||||||
profile := UniqueProfileName(fmt.Sprintf("offline-%s", rt))
|
startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2000", "--wait=true"}
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
|
startArgs = append(startArgs, StartArgs()...)
|
||||||
defer CleanupWithLogs(t, profile, cancel)
|
c := exec.CommandContext(ctx, Target(), startArgs...)
|
||||||
|
env := os.Environ()
|
||||||
|
// RFC1918 address that unlikely to host working a proxy server
|
||||||
|
env = append(env, "HTTP_PROXY=172.16.1.1:1")
|
||||||
|
env = append(env, "HTTP_PROXYS=172.16.1.1:1")
|
||||||
|
|
||||||
startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2000", "--wait=true", "--container-runtime", rt}
|
c.Env = env
|
||||||
startArgs = append(startArgs, StartArgs()...)
|
rr, err := Run(t, c)
|
||||||
c := exec.CommandContext(ctx, Target(), startArgs...)
|
if err != nil {
|
||||||
env := os.Environ()
|
// Fatal so that we may collect logs before stop/delete steps
|
||||||
// RFC1918 address that unlikely to host working a proxy server
|
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||||
env = append(env, "HTTP_PROXY=172.16.1.1:1")
|
}
|
||||||
env = append(env, "HTTP_PROXYS=172.16.1.1:1")
|
|
||||||
|
|
||||||
c.Env = env
|
|
||||||
rr, err := Run(t, c)
|
|
||||||
if err != nil {
|
|
||||||
// Fatal so that we may collect logs before stop/delete steps
|
|
||||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,15 @@ spec:
|
||||||
app: mysql
|
app: mysql
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- image: mysql:5.6
|
- image: mysql:5.7
|
||||||
name: mysql
|
name: mysql
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "512Mi"
|
||||||
|
cpu: "600m"
|
||||||
|
limits:
|
||||||
|
memory: "700Mi"
|
||||||
|
cpu: "700m"
|
||||||
env:
|
env:
|
||||||
# Use secret in real usage
|
# Use secret in real usage
|
||||||
- name: MYSQL_ROOT_PASSWORD
|
- name: MYSQL_ROOT_PASSWORD
|
||||||
|
|
Loading…
Reference in New Issue