Refactor and centralize container runtime code into a cruntime package

pull/3633/head
Thomas Stromberg 2019-02-05 14:42:12 -08:00
parent 854d77ac99
commit 966b245dc6
17 changed files with 267 additions and 432 deletions

14
Gopkg.lock generated
View File

@ -160,6 +160,19 @@
pruneopts = "NUT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
[[projects]]
digest = "1:2e3c336fc7fde5c984d2841455a658a6d626450b1754a854b3b32e7a8f49a07a"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/internal/diff",
"cmp/internal/function",
"cmp/internal/value",
]
pruneopts = "NUT"
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
version = "v0.2.0"
[[projects]]
digest = "1:186f7de0e878b5ff1fca82271ce36a7abf9747be09d03b3f08a921584c2f26fc"
name = "github.com/google/go-containerregistry"
@ -933,6 +946,7 @@
"github.com/docker/machine/libmachine/swarm",
"github.com/docker/machine/libmachine/version",
"github.com/golang/glog",
"github.com/google/go-cmp/cmp",
"github.com/google/go-containerregistry/pkg/authn",
"github.com/google/go-containerregistry/pkg/name",
"github.com/google/go-containerregistry/pkg/v1/remote",

View File

@ -110,3 +110,7 @@
go-tests = true
non-go = true
unused-packages = true
[[constraint]]
name = "github.com/google/go-cmp"
version = "0.2.0"

View File

@ -95,11 +95,11 @@ func SetBool(m config.MinikubeConfig, name string, val string) error {
return nil
}
// EnableOrDisableAddon updates addon status executing any commands necessary
func EnableOrDisableAddon(name string, val string) error {
enable, err := strconv.ParseBool(val)
if err != nil {
errors.Wrapf(err, "error attempted to parse enabled/disable value addon %s", name)
return errors.Wrapf(err, "parsing bool: %s", name)
}
//TODO(r2d4): config package should not reference API, pull this out
@ -111,28 +111,25 @@ func EnableOrDisableAddon(name string, val string) error {
defer api.Close()
cluster.EnsureMinikubeRunningOrExit(api, 0)
addon, _ := assets.Addons[name] // validation done prior
if err != nil {
return err
}
addon := assets.Addons[name]
host, err := cluster.CheckIfHostExistsAndLoad(api, config.GetMachineName())
if err != nil {
return errors.Wrap(err, "getting host")
}
cmd, err := machine.GetCommandRunner(host)
cmd, err := machine.CommandRunner(host)
if err != nil {
return errors.Wrap(err, "getting command runner")
return errors.Wrap(err, "command runner")
}
if enable {
for _, addon := range addon.Assets {
if err := cmd.Copy(addon); err != nil {
return errors.Wrapf(err, "error enabling addon %s", addon.AssetName)
return errors.Wrapf(err, "enabling addon %s", addon.AssetName)
}
}
} else {
for _, addon := range addon.Assets {
if err := cmd.Remove(addon); err != nil {
return errors.Wrapf(err, "error disabling addon %s", addon.AssetName)
return errors.Wrapf(err, "disabling addon %s", addon.AssetName)
}
}
}

View File

@ -24,11 +24,12 @@ import (
"strconv"
"strings"
"github.com/docker/go-units"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
)
func IsValidDriver(string, driver string) error {
@ -128,12 +129,18 @@ func IsValidAddon(name string, val string) error {
return errors.Errorf("Cannot enable/disable invalid addon %s", name)
}
// IsContainerdRuntime is a validator which returns an error if the current runtime is not containerd
func IsContainerdRuntime(_, _ string) error {
config, err := config.Load()
if err != nil {
return fmt.Errorf("error getting cluster config: %v", err)
return fmt.Errorf("config.Load: %v", err)
}
if config.KubernetesConfig.ContainerRuntime != constants.ContainerdRuntime {
r, err := cruntime.New(cruntime.Config{Type: config.KubernetesConfig.ContainerRuntime})
if err != nil {
return err
}
_, ok := r.(*cruntime.Containerd)
if !ok {
return fmt.Errorf(`This addon can only be enabled with the containerd runtime backend.
To enable this backend, please first stop minikube with:

View File

@ -40,6 +40,7 @@ import (
"k8s.io/minikube/pkg/minikube/cluster"
cfg "k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/machine"
pkgutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/kubeconfig"
@ -99,22 +100,6 @@ assumes you have already installed one of the VM drivers: virtualbox/vmwarefusio
Run: runStart,
}
// SetContainerRuntime possibly sets the container runtime
func SetContainerRuntime(cfg map[string]string, runtime string) map[string]string {
switch runtime {
case "crio", "cri-o":
cfg["runtime-endpoint"] = "unix:///var/run/crio/crio.sock"
cfg["image-endpoint"] = "unix:///var/run/crio/crio.sock"
case "containerd":
cfg["runtime-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["image-endpoint"] = "unix:///run/containerd/containerd.sock"
default:
return nil
}
return cfg
}
func runStart(cmd *cobra.Command, args []string) {
if glog.V(8) {
glog.Infoln("Viper configuration:")
@ -219,20 +204,6 @@ func runStart(cmd *cobra.Command, args []string) {
cmdutil.MaybeReportErrorAndExit(err)
}
// common config (currently none)
var cricfg = map[string]string{}
selectedContainerRuntime := viper.GetString(containerRuntime)
if cricfg := SetContainerRuntime(cricfg, selectedContainerRuntime); cricfg != nil {
var command string
fmt.Println("Writing crictl config...")
if command, err = cmdutil.GetCrictlConfigCommand(cricfg); err == nil {
_, err = host.RunSSHCommand(command)
}
if err != nil {
glog.Errorln("Error writing crictl config: ", err)
}
}
selectedKubernetesVersion := viper.GetString(kubernetesVersion)
if strings.Compare(selectedKubernetesVersion, "") == 0 {
selectedKubernetesVersion = constants.DefaultKubernetesVersion
@ -255,6 +226,7 @@ func runStart(cmd *cobra.Command, args []string) {
}
}
selectedContainerRuntime := viper.GetString(containerRuntime)
kubernetesConfig := cfg.KubernetesConfig{
KubernetesVersion: selectedKubernetesVersion,
NodeIP: ip,
@ -274,11 +246,6 @@ func runStart(cmd *cobra.Command, args []string) {
EnableDefaultCNI: viper.GetBool(enableDefaultCNI),
}
k8sBootstrapper, err := GetClusterBootstrapper(api, clusterBootstrapper)
if err != nil {
glog.Exitf("Error getting cluster bootstrapper: %v", err)
}
// Write profile cluster configuration to file
clusterConfig = cfg.Config{
MachineConfig: config,
@ -297,14 +264,17 @@ func runStart(cmd *cobra.Command, args []string) {
}
fmt.Println("Moving files into cluster...")
if err := k8sBootstrapper.UpdateCluster(kubernetesConfig); err != nil {
bs, err := GetClusterBootstrapper(api, clusterBootstrapper)
if err != nil {
glog.Exitf("Error getting cluster bootstrapper: %v", err)
}
if err := bs.UpdateCluster(kubernetesConfig); err != nil {
glog.Errorln("Error updating cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Setting up certs...")
if err := k8sBootstrapper.SetupCerts(kubernetesConfig); err != nil {
if err := bs.SetupCerts(kubernetesConfig); err != nil {
glog.Errorln("Error configuring authentication: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
@ -318,8 +288,6 @@ func runStart(cmd *cobra.Command, args []string) {
kubeHost = strings.Replace(kubeHost, ":2376", ":"+strconv.Itoa(kubernetesConfig.NodePort), -1)
fmt.Println("Setting up kubeconfig...")
// setup kubeconfig
kubeConfigFile := cmdutil.GetKubeConfigPath()
kubeCfgSetup := &kubeconfig.KubeConfigSetup{
@ -338,49 +306,19 @@ func runStart(cmd *cobra.Command, args []string) {
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Stopping extra container runtimes...")
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != "" {
if _, err := host.RunSSHCommand("sudo systemctl stop docker"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop docker.socket")
}
if err != nil {
glog.Errorf("Error stopping docker: %v", err)
}
}
if config.VMDriver != constants.DriverNone && (selectedContainerRuntime != constants.CrioRuntime && selectedContainerRuntime != constants.Cri_oRuntime) {
if _, err := host.RunSSHCommand("sudo systemctl stop crio"); err != nil {
glog.Errorf("Error stopping crio: %v", err)
}
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != constants.RktRuntime {
if _, err := host.RunSSHCommand("sudo systemctl stop rkt-api"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop rkt-metadata")
}
if err != nil {
glog.Errorf("Error stopping rkt: %v", err)
}
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime != constants.ContainerdRuntime {
if _, err = host.RunSSHCommand("sudo systemctl stop containerd"); err != nil {
glog.Errorf("Error stopping containerd: %v", err)
}
runner, err := machine.CommandRunner(host)
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver != constants.DriverNone && (selectedContainerRuntime == constants.CrioRuntime || selectedContainerRuntime == constants.Cri_oRuntime) {
fmt.Println("Restarting crio runtime...")
// restart crio so that it can monitor all hook dirs
if _, err := host.RunSSHCommand("sudo systemctl restart crio"); err != nil {
glog.Errorf("Error restarting crio: %v", err)
}
cr, err := cruntime.New(cruntime.Config{Type: selectedContainerRuntime, Runner: runner})
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver != constants.DriverNone && selectedContainerRuntime == constants.ContainerdRuntime {
fmt.Println("Restarting containerd runtime...")
// restart containerd so that it can install all plugins
if _, err := host.RunSSHCommand("sudo systemctl restart containerd"); err != nil {
glog.Errorf("Error restarting containerd: %v", err)
}
fmt.Printf("Configuring %s runtime...\n", cr.Name())
err = cr.Enable()
if err != nil {
cmdutil.MaybeReportErrorAndExit(err)
}
if config.VMDriver == constants.DriverNone {
@ -413,13 +351,13 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
if !exists || config.VMDriver == constants.DriverNone {
fmt.Println("Starting cluster components...")
if err := k8sBootstrapper.StartCluster(kubernetesConfig); err != nil {
if err := bs.StartCluster(kubernetesConfig); err != nil {
glog.Errorf("Error starting cluster: %v", err)
cmdutil.MaybeReportErrorAndExit(err)
}
} else {
fmt.Println("Machine exists, restarting cluster components...")
if err := k8sBootstrapper.RestartCluster(kubernetesConfig); err != nil {
if err := bs.RestartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error restarting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
@ -428,7 +366,7 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
// Block until the cluster is healthy.
fmt.Print("Verifying kubelet health ...")
kStat := func() (err error) {
st, err := k8sBootstrapper.GetKubeletStatus()
st, err := bs.GetKubeletStatus()
if err != nil || st != state.Running.String() {
fmt.Printf(".")
return &pkgutil.RetriableError{Err: fmt.Errorf("kubelet unhealthy: %v: %s", err, st)}
@ -442,7 +380,7 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
}
fmt.Print("\nVerifying apiserver health ...")
aStat := func() (err error) {
st, err := k8sBootstrapper.GetApiServerStatus(net.ParseIP(ip))
st, err := bs.GetApiServerStatus(net.ParseIP(ip))
if err != nil || st != state.Running.String() {
fmt.Print(".")
return &pkgutil.RetriableError{Err: fmt.Errorf("apiserver status=%s err=%v", st, err)}
@ -497,7 +435,6 @@ This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_
fmt.Println("Unable to load cached images from config file.")
}
fmt.Println("\n\nEverything looks great. Please enjoy minikube!")
return
}
func init() {
@ -526,12 +463,13 @@ func init() {
startCmd.Flags().String(serviceCIDR, pkgutil.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(&registryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(containerRuntime, "", "The container runtime to be used")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd, rkt)")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
// TODO(tstromberg): Flip cacheImages to true once it can be stabilized
startCmd.Flags().Bool(cacheImages, false, "If true, cache docker images for the current bootstrapper and load them into the machine.")
startCmd.Flags().Var(&extraOptions, "extra-config",
`A set of key=value pairs that describe configuration that may be passed to different components.

View File

@ -27,14 +27,11 @@ import (
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"text/template"
"time"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
@ -231,33 +228,6 @@ minikube config set WantKubectlDownloadMsg false
}
}
// Return a command to run, that will generate the crictl config file
func GetCrictlConfigCommand(cfg map[string]string) (string, error) {
var (
crictlYamlTmpl = `runtime-endpoint: {{.RuntimeEndpoint}}
image-endpoint: {{.ImageEndpoint}}
`
crictlYamlPath = "/etc/crictl.yaml"
)
t, err := template.New("crictlYaml").Parse(crictlYamlTmpl)
if err != nil {
return "", err
}
opts := struct {
RuntimeEndpoint string
ImageEndpoint string
}{
RuntimeEndpoint: cfg["runtime-endpoint"],
ImageEndpoint: cfg["image-endpoint"],
}
var crictlYamlBuf bytes.Buffer
if err := t.Execute(&crictlYamlBuf, opts); err != nil {
return "", err
}
return fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(crictlYamlPath), crictlYamlBuf.String(), crictlYamlPath), nil
}
// Ask the kernel for a free open port that is ready to use
func GetPort() (string, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")

View File

@ -17,54 +17,68 @@ limitations under the License.
package none
import (
"bytes"
"fmt"
"os/exec"
"strings"
"github.com/golang/glog"
"github.com/docker/machine/libmachine/drivers"
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/net"
pkgdrivers "k8s.io/minikube/pkg/drivers"
// TODO(tstromberg): Extract CommandRunner into its own package
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/cruntime"
)
const driverName = "none"
const dockerstopcmd = `docker kill $(docker ps -a --filter="name=k8s_" --format="{{.ID}}")`
var dockerkillcmd = fmt.Sprintf(`docker rm $(%s)`, dockerstopcmd)
// cleanupPaths are paths to be removed by cleanup, and are used by both kubeadm and minikube.
var cleanupPaths = []string{
"/data/minikube",
"/etc/kubernetes/manifests",
"/var/lib/minikube",
}
// none Driver is a driver designed to run kubeadm w/o a VM
// none Driver is a driver designed to run kubeadm w/o VM management, and assumes systemctl.
// https://github.com/kubernetes/minikube/blob/master/docs/vmdriver-none.md
type Driver struct {
*drivers.BaseDriver
*pkgdrivers.CommonDriver
URL string
URL string
runtime cruntime.Manager
exec bootstrapper.CommandRunner
}
// Config is configuration for the None driver
type Config struct {
MachineName string
StorePath string
ContainerRuntime string
}
func NewDriver(hostName, storePath string) *Driver {
// NewDriver returns a fully configured None driver
func NewDriver(c Config) *Driver {
runner := &bootstrapper.ExecRunner{}
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
// Libraries shouldn't panic, but there is no way for drivers to return error :(
if err != nil {
glog.Fatalf("unable to create container runtime: %v", err)
}
return &Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: hostName,
StorePath: storePath,
MachineName: c.MachineName,
StorePath: c.StorePath,
},
runtime: runtime,
exec: runner,
}
}
// PreCreateCheck checks for correct privileges and dependencies
func (d *Driver) PreCreateCheck() error {
if d.ContainerRuntime == "" {
// check that docker is on path
_, err := exec.LookPath("docker")
if err != nil {
return errors.Wrap(err, "docker cannot be found on the path for this machine. "+
"A docker installation is a requirement for using the none driver")
}
}
return nil
return d.runtime.Available()
}
func (d *Driver) Create() error {
@ -77,6 +91,7 @@ func (d *Driver) DriverName() string {
return driverName
}
// GetIP returns an IP or hostname that this host is available at
func (d *Driver) GetIP() (string, error) {
ip, err := net.ChooseBindAddress(nil)
if err != nil {
@ -85,87 +100,73 @@ func (d *Driver) GetIP() (string, error) {
return ip.String(), nil
}
// GetSSHHostname returns hostname for use with ssh
func (d *Driver) GetSSHHostname() (string, error) {
return "", fmt.Errorf("driver does not support ssh commands")
}
// GetSSHPort returns port for use with ssh
func (d *Driver) GetSSHPort() (int, error) {
return 0, fmt.Errorf("driver does not support ssh commands")
}
// GetURL returns a Docker compatible host URL for connecting to this host
// e.g. tcp://1.2.3.4:2376
func (d *Driver) GetURL() (string, error) {
ip, err := d.GetIP()
if err != nil {
return "", err
}
return fmt.Sprintf("tcp://%s:2376", ip), nil
}
// GetState returns the state that the host is in (running, stopped, etc)
func (d *Driver) GetState() (state.State, error) {
var statuscmd = fmt.Sprintf(
`sudo systemctl is-active kubelet &>/dev/null && echo "Running" || echo "Stopped"`)
out, err := runCommand(statuscmd, true)
if err != nil {
return state.None, err
}
s := strings.TrimSpace(out)
if state.Running.String() == s {
return state.Running, nil
} else if state.Stopped.String() == s {
if err := runningKubelet(d.exec); err != nil {
glog.Infof("kubelet not running: %v", err)
return state.Stopped, nil
} else {
return state.None, fmt.Errorf("Error: Unrecognize output from GetState: %s", s)
}
return state.Running, nil
}
// Kill stops a host forcefully, including any containers that we are managing.
func (d *Driver) Kill() error {
for _, cmdStr := range [][]string{
{"systemctl", "stop", "kubelet.service"},
{"rm", "-rf", "/var/lib/minikube"},
} {
cmd := exec.Command("sudo", cmdStr...)
if out, err := cmd.CombinedOutput(); err != nil {
glog.Warningf("Error %v running command: %s. Output: %s", err, cmdStr, out)
}
if err := stopKubelet(d.exec); err != nil {
return errors.Wrap(err, "kubelet")
}
containers, err := d.runtime.ListContainers(cruntime.MinikubeContainerPrefix)
if err != nil {
return errors.Wrap(err, "containers")
}
// Try to be graceful before sending SIGKILL everywhere.
if err := d.runtime.StopContainers(containers); err != nil {
return errors.Wrap(err, "stop")
}
if err := d.runtime.KillContainers(containers); err != nil {
return errors.Wrap(err, "kill")
}
return nil
}
// Remove a host, including any data which may have been written by it.
func (d *Driver) Remove() error {
rmCmd := `sudo systemctl stop kubelet.service
sudo rm -rf /data/minikube
sudo rm -rf /etc/kubernetes/manifests
sudo rm -rf /var/lib/minikube || true`
for _, cmdStr := range []string{rmCmd} {
if out, err := runCommand(cmdStr, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, cmdStr, out)
}
if err := d.Kill(); err != nil {
return errors.Wrap(err, "kill")
}
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerkillcmd, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, dockerkillcmd, out)
}
// TODO(tstromberg): Make sure this calls into the bootstrapper to perform `kubeadm reset`
cmd := fmt.Sprintf("sudo rm -rf %s", strings.Join(cleanupPaths, " "))
if err := d.exec.Run(cmd); err != nil {
glog.Errorf("cleanup incomplete: %v", err)
}
return nil
}
// Restart a host
func (d *Driver) Restart() error {
restartCmd := `
if systemctl is-active kubelet.service; then
sudo systemctl restart kubelet.service
fi`
cmd := exec.Command(restartCmd)
if err := cmd.Start(); err != nil {
return err
}
return nil
return restartKubelet(d.exec)
}
// Start a host
func (d *Driver) Start() error {
var err error
d.IPAddress, err = d.GetIP()
@ -179,50 +180,40 @@ func (d *Driver) Start() error {
return nil
}
// Stop a host gracefully, including any containers that we are managing.
func (d *Driver) Stop() error {
var stopcmd = fmt.Sprintf("if [[ `systemctl` =~ -\\.mount ]] &>/dev/null; " + `then
for svc in "kubelet"; do
sudo systemctl stop "$svc".service || true
done
fi
`)
_, err := runCommand(stopcmd, false)
if err != nil {
if err := stopKubelet(d.exec); err != nil {
return err
}
for {
s, err := d.GetState()
if err != nil {
return err
}
if s != state.Running {
break
}
containers, err := d.runtime.ListContainers(cruntime.MinikubeContainerPrefix)
if err != nil {
return errors.Wrap(err, "containers")
}
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerstopcmd, false); err != nil {
glog.Warningf("Error %v running command %s. Output: %s", err, dockerstopcmd, out)
}
if err := d.runtime.StopContainers(containers); err != nil {
return errors.Wrap(err, "stop")
}
return nil
}
// RunSSHCommandFromDriver implements direct ssh control to the driver
func (d *Driver) RunSSHCommandFromDriver() error {
return fmt.Errorf("driver does not support ssh commands")
}
func runCommand(command string, sudo bool) (string, error) {
cmd := exec.Command("/bin/bash", "-c", command)
if sudo {
cmd = exec.Command("sudo", "/bin/bash", "-c", command)
}
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", errors.Wrap(err, stderr.String())
}
return out.String(), nil
// stopKubelet idempotently stops the kubelet
func stopKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("stopping kubelet.service ...")
return exec.Run("sudo systemctl stop kubelet.service")
}
// restartKubelet restarts the kubelet
func restartKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("restarting kubelet.service ...")
return exec.Run("sudo systemctl restart kubelet.service")
}
// runningKubelet returns an error if the kubelet is not running.
func runningKubelet(exec bootstrapper.CommandRunner) error {
glog.Infof("checking for running kubelet ...")
return exec.Run("systemctl is-active --quiet service kubelet")
}

View File

@ -26,6 +26,8 @@ import (
// Bootstrapper contains all the methods needed to bootstrap a kubernetes cluster
type Bootstrapper interface {
// PullImages pulls images necessary for a cluster. Success should not be required.
PullImages(config.KubernetesConfig) error
StartCluster(config.KubernetesConfig) error
UpdateCluster(config.KubernetesConfig) error
RestartCluster(config.KubernetesConfig) error

View File

@ -40,11 +40,33 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/util"
)
// SkipPreflights are preflight checks we always skip.
var SkipPreflights = []string{
// We use --ignore-preflight-errors=DirAvailable since we have our own custom addons
// that we also stick in /etc/kubernetes/manifests
"DirAvailable--etc-kubernetes-manifests",
"DirAvailable--data-minikube",
"Port-10250",
"FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml",
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
// We use --ignore-preflight-errors=Swap since minikube.iso allocates a swap partition.
// (it should probably stop doing this, though...)
"Swap",
// We use --ignore-preflight-errors=CRI since /var/run/dockershim.sock is not present.
// (because we start kubelet with an invalid config)
"CRI",
}
// SkipAdditionalPreflights are additional preflights we skip depending on the runtime in use.
var SkipAdditionalPreflights = map[string][]string{}
type KubeadmBootstrapper struct {
c bootstrapper.CommandRunner
}
@ -54,20 +76,11 @@ func NewKubeadmBootstrapper(api libmachine.API) (*KubeadmBootstrapper, error) {
if err != nil {
return nil, errors.Wrap(err, "getting api client")
}
var cmd bootstrapper.CommandRunner
// The none driver executes commands directly on the host
if h.Driver.DriverName() == constants.DriverNone {
cmd = &bootstrapper.ExecRunner{}
} else {
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client")
}
cmd = bootstrapper.NewSSHRunner(client)
runner, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "command runner")
}
return &KubeadmBootstrapper{
c: cmd,
}, nil
return &KubeadmBootstrapper{c: runner}, nil
}
func (k *KubeadmBootstrapper) GetKubeletStatus() (string, error) {
@ -137,21 +150,13 @@ func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error {
return errors.Wrap(err, "parsing kubernetes version")
}
b := bytes.Buffer{}
preflights := constants.Preflights
if k8s.ContainerRuntime != "" {
preflights = constants.AlternateRuntimePreflights
out, err := k.c.CombinedOutput("sudo modprobe br_netfilter")
if err != nil {
glog.Infoln(out)
return errors.Wrap(err, "sudo modprobe br_netfilter")
}
out, err = k.c.CombinedOutput("sudo sh -c \"echo '1' > /proc/sys/net/ipv4/ip_forward\"")
if err != nil {
glog.Infoln(out)
return errors.Wrap(err, "creating /proc/sys/net/ipv4/ip_forward")
}
r, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime})
if err != nil {
return err
}
b := bytes.Buffer{}
preflights := SkipPreflights
preflights = append(preflights, SkipAdditionalPreflights[r.Name()]...)
templateContext := struct {
KubeadmConfigFile string
@ -208,6 +213,7 @@ func addAddons(files *[]assets.CopyableFile) error {
return nil
}
// RestartCluster restarts the Kubernetes cluster configured by kubeadm
func (k *KubeadmBootstrapper) RestartCluster(k8s config.KubernetesConfig) error {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
@ -241,63 +247,23 @@ func (k *KubeadmBootstrapper) RestartCluster(k8s config.KubernetesConfig) error
return nil
}
// PullImages downloads images that will be used by RestartCluster
func (k *KubeadmBootstrapper) PullImages(k8s config.KubernetesConfig) error {
cmd := fmt.Sprintf("sudo kubeadm config images pull --config %s", constants.KubeadmConfigFile)
if err := k.c.Run(cmd); err != nil {
return errors.Wrapf(err, "running cmd: %s", cmd)
}
return nil
}
// SetupCerts sets up certificates within the cluster.
func (k *KubeadmBootstrapper) SetupCerts(k8s config.KubernetesConfig) error {
return bootstrapper.SetupCerts(k.c, k8s)
}
// SetContainerRuntime possibly sets the container runtime, if it hasn't already
// been specified by the extra-config option. It has a set of defaults known to
// work for a particular runtime.
func SetContainerRuntime(cfg map[string]string, runtime string) map[string]string {
if _, ok := cfg["container-runtime"]; ok {
glog.Infoln("Container runtime already set through extra options, ignoring --container-runtime flag.")
return cfg
}
if runtime == "" {
glog.Infoln("Container runtime flag provided with no value, using defaults.")
return cfg
}
switch runtime {
case "crio", "cri-o":
cfg["container-runtime"] = "remote"
cfg["container-runtime-endpoint"] = "/var/run/crio/crio.sock"
cfg["image-service-endpoint"] = "/var/run/crio/crio.sock"
cfg["runtime-request-timeout"] = "15m"
case "containerd":
cfg["container-runtime"] = "remote"
cfg["container-runtime-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["image-service-endpoint"] = "unix:///run/containerd/containerd.sock"
cfg["runtime-request-timeout"] = "15m"
default:
cfg["container-runtime"] = runtime
}
return cfg
}
func GetCRISocket(path string, runtime string) string {
if path != "" {
glog.Infoln("Container runtime interface socket provided, using path.")
return path
}
switch runtime {
case "crio", "cri-o":
path = "/var/run/crio/crio.sock"
case "containerd":
path = "/run/containerd/containerd.sock"
default:
path = ""
}
return path
}
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(k8s config.KubernetesConfig) (string, error) {
func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string, error) {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return "", errors.Wrap(err, "parsing kubernetes version")
@ -308,8 +274,9 @@ func NewKubeletConfig(k8s config.KubernetesConfig) (string, error) {
return "", errors.Wrap(err, "generating extra configuration for kubelet")
}
extraOpts = SetContainerRuntime(extraOpts, k8s.ContainerRuntime)
for k, v := range r.KubeletOptions() {
extraOpts[k] = v
}
if k8s.NetworkPlugin != "" {
extraOpts["network-plugin"] = k8s.NetworkPlugin
}
@ -346,16 +313,20 @@ func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
return errors.Wrap(err, "loading cached images")
}
}
kubeadmCfg, err := generateConfig(cfg)
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime, Socket: cfg.CRISocket})
if err != nil {
return errors.Wrap(err, "runtime")
}
kubeadmCfg, err := generateConfig(cfg, r)
if err != nil {
return errors.Wrap(err, "generating kubeadm cfg")
}
kubeletCfg, err := NewKubeletConfig(cfg)
kubeletCfg, err := NewKubeletConfig(cfg, r)
if err != nil {
return errors.Wrap(err, "generating kubelet config")
}
glog.Infof("kubelet %s config:\n%s", cfg.KubernetesVersion, kubeletCfg)
files := []assets.CopyableFile{
assets.NewMemoryAssetTarget([]byte(kubeletService), constants.KubeletServiceFile, "0640"),
@ -416,14 +387,12 @@ sudo systemctl start kubelet
return nil
}
func generateConfig(k8s config.KubernetesConfig) (string, error) {
func generateConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string, error) {
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return "", errors.Wrap(err, "parsing kubernetes version")
}
criSocket := GetCRISocket(k8s.CRISocket, k8s.ContainerRuntime)
// parses a map of the feature gates for kubeadm and component
kubeadmFeatureArgs, componentFeatureArgs, err := ParseFeatureArgs(k8s.FeatureGates)
if err != nil {
@ -462,7 +431,7 @@ func generateConfig(k8s config.KubernetesConfig) (string, error) {
KubernetesVersion: k8s.KubernetesVersion,
EtcdDataDir: "/data/minikube", //TODO(r2d4): change to something else persisted
NodeName: k8s.NodeName,
CRISocket: criSocket,
CRISocket: r.SocketPath(),
ExtraArgs: extraComponentConfig,
FeatureArgs: kubeadmFeatureArgs,
NoTaintMaster: false, // That does not work with k8s 1.12+
@ -513,11 +482,10 @@ func maybeDownloadAndCache(binary, version string) (string, error) {
options.Checksum = constants.GetKubernetesReleaseURLSha1(binary, version)
options.ChecksumHash = crypto.SHA1
fmt.Printf("Downloading %s %s\n", binary, version)
glog.Infof("Downloading %s %s", binary, version)
if err := download.ToFile(url, targetFilepath, options); err != nil {
return "", errors.Wrapf(err, "Error downloading %s %s", binary, version)
}
fmt.Printf("Finished Downloading %s %s\n", binary, version)
glog.Infof("Finished Downloading %s %s", binary, version)
return targetFilepath, nil
}

View File

@ -20,6 +20,7 @@ import (
"testing"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/util"
)
@ -257,8 +258,13 @@ apiServerExtraArgs:
}
for _, test := range tests {
runtime, err := cruntime.New(cruntime.Config{Type: "docker"})
if err != nil {
t.Fatalf("runtime: %v", err)
}
t.Run(test.description, func(t *testing.T) {
actualCfg, err := generateConfig(test.cfg)
actualCfg, err := generateConfig(test.cfg, runtime)
if err != nil && !test.shouldErr {
t.Errorf("got unexpected error generating config: %v", err)
return

View File

@ -155,41 +155,6 @@ const (
DefaultRktNetConfigPath = "/etc/rkt/net.d/k8s.conf"
)
var Preflights = []string{
// We use --ignore-preflight-errors=DirAvailable since we have our own custom addons
// that we also stick in /etc/kubernetes/manifests
"DirAvailable--etc-kubernetes-manifests",
"DirAvailable--data-minikube",
"Port-10250",
"FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml",
"FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml",
"FileAvailable--etc-kubernetes-manifests-etcd.yaml",
// We use --ignore-preflight-errors=Swap since minikube.iso allocates a swap partition.
// (it should probably stop doing this, though...)
"Swap",
// We use --ignore-preflight-errors=CRI since /var/run/dockershim.sock is not present.
// (because we start kubelet with an invalid config)
"CRI",
}
// AlternateRuntimePreflights are additional preflight checks that are skipped when running
// any container runtime that isn't Docker
var AlternateRuntimePreflights = append(Preflights, []string{
"Service-Docker",
"Port-8443",
"Port-10251",
"Port-10252",
"Port-2379",
}...)
const (
ContainerdRuntime = "containerd"
RktRuntime = "rkt"
CrioRuntime = "crio"
Cri_oRuntime = "cri-o"
)
const (
DefaultUfsPort = "5640"
DefaultUfsDebugLvl = 0
@ -286,7 +251,7 @@ const (
GvisorFilesPath = "/tmp/gvisor"
// ContainerdConfigTomlPath is the path to the containerd config.toml
ContainerdConfigTomlPath = "/etc/containerd/config.toml"
// GvisorContainerdShimTomlPath is the path to givosr-containerd-shim.toml
// GvisorContainerdShimTomlPath is the path to gvisor-containerd-shim.toml
GvisorContainerdShimTomlPath = "/etc/containerd/gvisor-containerd-shim.toml"
// StoredContainerdConfigTomlPath is the path where the default config.toml will be stored
StoredContainerdConfigTomlPath = "/tmp/config.toml"

View File

@ -32,17 +32,16 @@ func init() {
Builtin: true,
ConfigCreator: createNoneHost,
DriverCreator: func() drivers.Driver {
return none.NewDriver("", "")
return none.NewDriver(none.Config{})
},
})
}
// createNoneHost creates a none Driver from a MachineConfig
func createNoneHost(config cfg.MachineConfig) interface{} {
return &none.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: cfg.GetMachineName(),
StorePath: constants.GetMinipath(),
},
return none.NewDriver(none.Config{
MachineName: cfg.GetMachineName(),
StorePath: constants.GetMinipath(),
ContainerRuntime: config.ContainerRuntime,
}
})
}

View File

@ -26,30 +26,27 @@ import (
"strings"
"sync"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/sshutil"
"github.com/golang/glog"
"github.com/pkg/errors"
)
const tempLoadDir = "/tmp"
var getWindowsVolumeName = getWindowsVolumeNameCmd
var podmanLoad sync.Mutex
// loadImageLock is used to serialize image loads to avoid overloading the guest VM
var loadImageLock sync.Mutex
func CacheImagesForBootstrapper(version string, clusterBootstrapper string) error {
images := bootstrapper.GetCachedImageList(version, clusterBootstrapper)
@ -198,7 +195,7 @@ func getWindowsVolumeNameCmd(d string) (string, error) {
return vname, nil
}
func LoadFromCacheBlocking(cmd bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error {
func LoadFromCacheBlocking(cr bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error {
glog.Infoln("Loading image from cache at ", src)
filename := filepath.Base(src)
for {
@ -211,34 +208,26 @@ func LoadFromCacheBlocking(cmd bootstrapper.CommandRunner, k8s config.Kubernetes
if err != nil {
return errors.Wrapf(err, "creating copyable file asset: %s", filename)
}
if err := cmd.Copy(f); err != nil {
if err := cr.Copy(f); err != nil {
return errors.Wrap(err, "transferring cached image")
}
var dockerLoadCmd string
crio := k8s.ContainerRuntime == constants.CrioRuntime || k8s.ContainerRuntime == constants.Cri_oRuntime
if crio {
dockerLoadCmd = "sudo podman load -i " + dst
} else {
dockerLoadCmd = "docker load -i " + dst
r, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: cr})
if err != nil {
return errors.Wrap(err, "runtime")
}
loadImageLock.Lock()
defer loadImageLock.Unlock()
if crio {
podmanLoad.Lock()
err = r.LoadImage(dst)
if err != nil {
return errors.Wrapf(err, "%s load %s", r.Name(), dst)
}
loadImageLock.Unlock()
if err := cmd.Run(dockerLoadCmd); err != nil {
return errors.Wrapf(err, "loading docker image: %s", dst)
}
if crio {
podmanLoad.Unlock()
}
if err := cmd.Run("sudo rm -rf " + dst); err != nil {
if err := cr.Run("sudo rm -rf " + dst); err != nil {
return errors.Wrap(err, "deleting temp docker image location")
}
glog.Infof("Successfully loaded image %s from cache", src)
return nil
}

View File

@ -25,12 +25,6 @@ import (
"path/filepath"
"time"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/provision"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/auth"
"github.com/docker/machine/libmachine/cert"
@ -48,6 +42,11 @@ import (
"github.com/docker/machine/libmachine/version"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/registry"
"k8s.io/minikube/pkg/minikube/sshutil"
"k8s.io/minikube/pkg/provision"
)
func NewRPCClient(storePath, certsDir string) libmachine.API {
@ -142,16 +141,16 @@ func (api *LocalClient) Close() error {
return nil
}
func GetCommandRunner(h *host.Host) (bootstrapper.CommandRunner, error) {
if h.DriverName != constants.DriverNone {
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client for bootstrapper")
}
return bootstrapper.NewSSHRunner(client), nil
// CommandRunner returns best available command runner for this host
func CommandRunner(h *host.Host) (bootstrapper.CommandRunner, error) {
if h.DriverName == constants.DriverNone {
return &bootstrapper.ExecRunner{}, nil
}
return &bootstrapper.ExecRunner{}, nil
client, err := sshutil.NewSSHClient(h.Driver)
if err != nil {
return nil, errors.Wrap(err, "getting ssh client for bootstrapper")
}
return bootstrapper.NewSSHRunner(client), nil
}
func (api *LocalClient) Create(h *host.Host) error {

View File

@ -23,7 +23,6 @@ import (
"testing"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/test/integration/util"
)
@ -61,7 +60,7 @@ func TestFunctionalContainerd(t *testing.T) {
minikubeRunner.RunCommand("delete", true)
}
minikubeRunner.SetRuntime(constants.ContainerdRuntime)
minikubeRunner.SetRuntime("containerd")
minikubeRunner.EnsureRunning()
t.Run("Gvisor", testGvisor)

View File

@ -25,32 +25,20 @@ import (
"time"
"github.com/docker/machine/libmachine/state"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/test/integration/util"
)
func TestStartStop(t *testing.T) {
tests := []struct {
name string
runtime string
}{
{
name: "default",
runtime: "",
},
{
name: "containerd",
runtime: constants.ContainerdRuntime,
},
{
name: "crio",
runtime: constants.CrioRuntime,
},
// TODO(tstromberg): Add test for crio w/o cni
{runtime: "docker"},
{runtime: "containerd"},
{runtime: "crio"},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run(test.runtime, func(t *testing.T) {
runner := NewMinikubeRunner(t)
if test.runtime != "" && usingNoneDriver(runner) {
t.Skipf("skipping, can't use %s with none driver", test.runtime)

View File

@ -34,7 +34,6 @@ import (
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
commonutil "k8s.io/minikube/pkg/util"
)
@ -207,9 +206,9 @@ func (m *MinikubeRunner) Start() {
opts := ""
// TODO(tstromberg): Deprecate this in favor of making it possible for tests to define explicit flags.
switch r := m.Runtime; r {
case constants.ContainerdRuntime:
case "containerd":
opts = "--container-runtime=containerd --network-plugin=cni --enable-default-cni --docker-opt containerd=/var/run/containerd/containerd.sock"
case constants.CrioRuntime:
case "crio":
opts = "--container-runtime=crio --network-plugin=cni --enable-default-cni"
}
m.RunCommand(fmt.Sprintf("start %s %s %s --alsologtostderr --v=5", m.StartArgs, m.Args, opts), true)