Merge pull request #3767 from tstromberg/cache_images_int_test2

caching: Fix containerd, improve console messages, add integration tests
pull/3887/head
Thomas Strömberg 2019-03-14 08:19:25 -07:00 committed by GitHub
commit 016e3f3178
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 40 additions and 58 deletions

View File

@ -237,7 +237,7 @@ func beginCacheImages(g *errgroup.Group, kVersion string) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("caching", "Caching images in the background ...")
console.OutStyle("caching", "Downloading Kubernetes %s images in the background ...", kVersion)
g.Go(func() error {
return machine.CacheImagesForBootstrapper(kVersion, viper.GetString(cmdcfg.Bootstrapper))
})
@ -487,7 +487,7 @@ func waitCacheImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("waiting", "Waiting for image caching to complete ...")
console.OutStyle("waiting", "Waiting for image downloads to complete ...")
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}

View File

@ -373,9 +373,8 @@ func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string,
func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
if cfg.ShouldLoadCachedImages {
err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir)
if err != nil {
return errors.Wrap(err, "loading cached images")
if err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir); err != nil {
console.Failure("Unable to load cached images: %v", err)
}
}
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime, Socket: cfg.CRISocket})

View File

@ -80,7 +80,7 @@ func (r *Containerd) Disable() error {
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("sudo ctr cri load %s", path))
return r.Runner.Run(fmt.Sprintf("sudo ctr images import %s", path))
}
// KubeletOptions returns kubelet options for a containerd

View File

@ -27,8 +27,8 @@ import (
)
func TestFunctional(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
minikubeRunner.EnsureRunning()
r := NewMinikubeRunner(t)
r.EnsureRunning()
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
t.Run("Status", testClusterStatus)
@ -41,7 +41,7 @@ func TestFunctional(t *testing.T) {
t.Run("Provisioning", testProvisioning)
t.Run("Tunnel", testTunnel)
if !usingNoneDriver(minikubeRunner) {
if !usingNoneDriver(r) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
@ -50,25 +50,22 @@ func TestFunctional(t *testing.T) {
}
func TestFunctionalContainerd(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
r := NewMinikubeRunner(t)
if usingNoneDriver(minikubeRunner) {
if usingNoneDriver(r) {
t.Skip("Can't run containerd backend with none driver")
}
if minikubeRunner.GetStatus() != state.None.String() {
minikubeRunner.RunCommand("delete", true)
if r.GetStatus() != state.None.String() {
r.RunCommand("delete", true)
}
minikubeRunner.SetRuntime("containerd")
minikubeRunner.EnsureRunning()
r.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
t.Run("Gvisor", testGvisor)
t.Run("GvisorRestart", testGvisorRestart)
minikubeRunner.RunCommand("delete", true)
r.RunCommand("delete", true)
}
// usingNoneDriver returns true if using the none driver
func usingNoneDriver(runner util.MinikubeRunner) bool {
return strings.Contains(runner.StartArgs, "--vm-driver=none")
func usingNoneDriver(r util.MinikubeRunner) bool {
return strings.Contains(r.StartArgs, "--vm-driver=none")
}

View File

@ -30,48 +30,47 @@ import (
func TestStartStop(t *testing.T) {
tests := []struct {
runtime string
name string
args []string
}{
{runtime: "docker"},
{runtime: "containerd"},
{runtime: "crio"},
{"docker+cache", []string{"--container-runtime=docker", "--cache-images"}},
{"containerd+cache", []string{"--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock", "--cache-images"}},
{"crio+cache", []string{"--container-runtime=crio", "--cache-images"}},
}
for _, test := range tests {
t.Run(test.runtime, func(t *testing.T) {
runner := NewMinikubeRunner(t)
if test.runtime != "docker" && usingNoneDriver(runner) {
t.Skipf("skipping, can't use %s with none driver", test.runtime)
t.Run(test.name, func(t *testing.T) {
r := NewMinikubeRunner(t)
if !strings.Contains(test.name, "docker") && usingNoneDriver(r) {
t.Skipf("skipping %s - incompatible with none driver", test.name)
}
runner.RunCommand("config set WantReportErrorPrompt false", true)
runner.RunCommand("delete", false)
runner.CheckStatus(state.None.String())
r.RunCommand("config set WantReportErrorPrompt false", true)
r.RunCommand("delete", false)
r.CheckStatus(state.None.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())
runner.SetRuntime(test.runtime)
runner.Start()
runner.CheckStatus(state.Running.String())
ip := runner.RunCommand("ip", true)
ip := r.RunCommand("ip", true)
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s", ip)
}
checkStop := func() error {
runner.RunCommand("stop", true)
return runner.CheckStatusNoFail(state.Stopped.String())
r.RunCommand("stop", true)
return r.CheckStatusNoFail(state.Stopped.String())
}
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err)
}
runner.Start()
runner.CheckStatus(state.Running.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())
runner.RunCommand("delete", true)
runner.CheckStatus(state.None.String())
r.RunCommand("delete", true)
r.CheckStatus(state.None.String())
})
}
}

View File

@ -184,11 +184,6 @@ func (m *MinikubeRunner) RunDaemon2(command string) (*exec.Cmd, *bufio.Reader, *
return cmd, bufio.NewReader(stdoutPipe), bufio.NewReader(stderrPipe)
}
// SetRuntime saves the runtime backend
func (m *MinikubeRunner) SetRuntime(runtime string) {
m.Runtime = runtime
}
func (m *MinikubeRunner) SSH(command string) (string, error) {
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, "ssh", command)
@ -202,17 +197,9 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
return string(stdout), nil
}
func (m *MinikubeRunner) Start() {
opts := ""
// TODO(tstromberg): Deprecate this in favor of making it possible for tests to define explicit flags.
switch r := m.Runtime; r {
case "containerd":
opts = "--container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock"
case "crio":
opts = "--container-runtime=cri-o"
}
m.RunCommand(fmt.Sprintf("start %s %s %s --alsologtostderr --v=5", m.StartArgs, m.Args, opts), true)
func (m *MinikubeRunner) Start(opts ...string) {
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " "))
m.RunCommand(cmd, true)
}
func (m *MinikubeRunner) EnsureRunning() {