The podman driver should not be run as root

Use sudo for the podman commands instead

Wrap the docker commands with env prefix
pull/7631/head
Anders F Björklund 2020-04-12 16:12:30 +02:00
parent f66ebabd54
commit d7dc7bf7b2
17 changed files with 99 additions and 89 deletions

View File

@ -90,17 +90,17 @@ func init() {
func deleteContainersAndVolumes() {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
errs := oci.DeleteContainersByLabel("env", oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
errs = oci.DeleteAllVolumesByLabel("env", oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
errs = oci.PruneAllVolumesByLabel("env", oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
@ -191,16 +191,16 @@ func DeleteProfiles(profiles []*config.Profile) []error {
func deleteProfileContainersAndVolumes(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
errs := oci.DeleteContainersByLabel("env", oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
errs = oci.DeleteAllVolumesByLabel("env", oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
glog.Warningf("error deleting volumes (might be okay).\nTo see the list of volumes run: 'docker volume ls'\n:%v", errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
errs = oci.PruneAllVolumesByLabel("env", oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}

View File

@ -31,7 +31,6 @@ JOB_NAME="Podman_Linux"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
SUDO_PREFIX="sudo -E "
EXTRA_ARGS="--container-runtime=containerd"

View File

@ -44,6 +44,7 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
driver := kic.NewDriver(kic.Config{
KubernetesVersion: kubernetesVersion,
ContainerRuntime: driver.Docker,
OCIPrefix: "env",
OCIBinary: oci.Docker,
MachineName: profile,
ImageDigest: kic.BaseImage,

View File

@ -48,6 +48,7 @@ type Driver struct {
URL string
exec command.Runner
NodeConfig Config
OCIPrefix string // env, sudo
OCIBinary string // docker,podman
}
@ -58,8 +59,9 @@ func NewDriver(c Config) *Driver {
MachineName: c.MachineName,
StorePath: c.StorePath,
},
exec: command.NewKICRunner(c.MachineName, c.OCIBinary),
exec: command.NewKICRunner(c.MachineName, c.OCIPrefix, c.OCIBinary),
NodeConfig: c,
OCIPrefix: c.OCIPrefix,
OCIBinary: c.OCIBinary,
}
return d
@ -76,6 +78,7 @@ func (d *Driver) Create() error {
Memory: strconv.Itoa(d.NodeConfig.Memory) + "mb",
Envs: d.NodeConfig.Envs,
ExtraArgs: []string{"--expose", fmt.Sprintf("%d", d.NodeConfig.APIServerPort)},
OCIPrefix: d.NodeConfig.OCIPrefix,
OCIBinary: d.NodeConfig.OCIBinary,
APIServerPort: d.NodeConfig.APIServerPort,
}
@ -99,15 +102,15 @@ func (d *Driver) Create() error {
},
)
exists, err := oci.ContainerExists(d.OCIBinary, params.Name)
exists, err := oci.ContainerExists(d.OCIPrefix, d.OCIBinary, params.Name)
if err != nil {
glog.Warningf("failed to check if container already exists: %v", err)
}
if exists {
// if container was created by minikube it is safe to delete and recreate it.
if oci.IsCreatedByMinikube(d.OCIBinary, params.Name) {
if oci.IsCreatedByMinikube(d.OCIPrefix, d.OCIBinary, params.Name) {
glog.Info("Found already existing abandoned minikube container, will try to delete.")
if err := oci.DeleteContainer(d.OCIBinary, params.Name); err != nil {
if err := oci.DeleteContainer(d.OCIPrefix, d.OCIBinary, params.Name); err != nil {
glog.Errorf("Failed to delete a conflicting minikube container %s. You might need to restart your %s daemon and delete it manually and try again: %v", params.Name, params.OCIBinary, err)
}
} else {
@ -159,7 +162,7 @@ func (d *Driver) prepareSSH() error {
return errors.Wrap(err, "generate ssh key")
}
cmder := command.NewKICRunner(d.NodeConfig.MachineName, d.NodeConfig.OCIBinary)
cmder := command.NewKICRunner(d.NodeConfig.MachineName, d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary)
f, err := assets.NewFileAsset(d.GetSSHKeyPath()+".pub", "/home/docker/.ssh/", "authorized_keys", "0644")
if err != nil {
return errors.Wrap(err, "create pubkey assetfile ")
@ -234,7 +237,7 @@ func (d *Driver) GetURL() (string, error) {
// GetState returns the state that the host is in (running, stopped, etc)
func (d *Driver) GetState() (state.State, error) {
out, err := oci.WarnIfSlow(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
out, err := oci.WarnIfSlow(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
if err != nil {
return state.Error, err
}
@ -259,17 +262,17 @@ func (d *Driver) GetState() (state.State, error) {
// Kill stops a host forcefully, including any containers that we are managing.
func (d *Driver) Kill() error {
// on init this doesn't get filled when called from cmd
d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary)
d.exec = command.NewKICRunner(d.MachineName, d.OCIPrefix, d.OCIBinary)
if err := sysinit.New(d.exec).ForceStop("kubelet"); err != nil {
glog.Warningf("couldn't force stop kubelet. will continue with kill anyways: %v", err)
}
if err := oci.ShutDown(d.OCIBinary, d.MachineName); err != nil {
if err := oci.ShutDown(d.OCIPrefix, d.OCIBinary, d.MachineName); err != nil {
glog.Warningf("couldn't shutdown the container, will continue with kill anyways: %v", err)
}
cr := command.NewExecRunner() // using exec runner for interacting with dameon.
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName)); err != nil {
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, "kill", d.MachineName)); err != nil {
return errors.Wrapf(err, "killing %q", d.MachineName)
}
return nil
@ -277,11 +280,11 @@ func (d *Driver) Kill() error {
// Remove will delete the Kic Node Container
func (d *Driver) Remove() error {
if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil {
if _, err := oci.ContainerID(d.OCIPrefix, d.OCIBinary, d.MachineName); err != nil {
glog.Infof("could not find the container %s to remove it. will try anyways", d.MachineName)
}
if err := oci.DeleteContainer(d.NodeConfig.OCIBinary, d.MachineName); err != nil {
if err := oci.DeleteContainer(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, d.MachineName); err != nil {
if strings.Contains(err.Error(), "is already in progress") {
return errors.Wrap(err, "stuck delete")
}
@ -292,7 +295,7 @@ func (d *Driver) Remove() error {
}
// check there be no container left after delete
if id, err := oci.ContainerID(d.OCIBinary, d.MachineName); err == nil && id != "" {
if id, err := oci.ContainerID(d.OCIPrefix, d.OCIBinary, d.MachineName); err == nil && id != "" {
return fmt.Errorf("expected no container ID be found for %q after delete. but got %q", d.MachineName, id)
}
return nil
@ -320,11 +323,11 @@ func (d *Driver) Restart() error {
// Start an already created kic container
func (d *Driver) Start() error {
cr := command.NewExecRunner() // using exec runner for interacting with docker/podman daemon
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName)); err != nil {
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, "start", d.MachineName)); err != nil {
return errors.Wrap(err, "start")
}
checkRunning := func() error {
s, err := oci.ContainerStatus(d.NodeConfig.OCIBinary, d.MachineName)
s, err := oci.ContainerStatus(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, d.MachineName)
if err != nil {
return err
}
@ -344,7 +347,7 @@ func (d *Driver) Start() error {
// Stop a host gracefully, including any containers that we are managing.
func (d *Driver) Stop() error {
// on init this doesn't get filled when called from cmd
d.exec = command.NewKICRunner(d.MachineName, d.OCIBinary)
d.exec = command.NewKICRunner(d.MachineName, d.OCIPrefix, d.OCIBinary)
// docker does not send right SIG for systemd to know to stop the systemd.
// to avoid bind address be taken on an upgrade. more info https://github.com/kubernetes/minikube/issues/7171
if err := sysinit.New(d.exec).Stop("kubelet"); err != nil {
@ -379,7 +382,7 @@ func (d *Driver) Stop() error {
glog.Warningf("couldn't stop kube-apiserver proc: %v", err)
}
cmd := exec.Command(d.NodeConfig.OCIBinary, "stop", d.MachineName)
cmd := exec.Command(d.NodeConfig.OCIPrefix, d.NodeConfig.OCIBinary, "stop", d.MachineName)
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "stopping %s", d.MachineName)
}

View File

@ -234,7 +234,7 @@ func dockerSystemInfo() (dockerSysInfo, error) {
// podmanSysInfo returns podman system info --format '{{json .}}'
func podmanSystemInfo() (podmanSysInfo, error) {
var ps podmanSysInfo
cmd := exec.Command(Podman, "system", "info", "--format", "'{{json .}}'")
cmd := exec.Command("sudo", Podman, "system", "info", "--format", "'{{json .}}'")
out, err := cmd.CombinedOutput()
if err != nil {
return ps, errors.Wrap(err, "get podman system info")

View File

@ -66,7 +66,7 @@ func dockerGatewayIP() (net.IP, error) {
}
bridgeID := strings.TrimSpace(string(out))
cmd = exec.Command(Docker, "inspect",
cmd = exec.Command("env", Docker, "inspect",
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID)
out, err = cmd.CombinedOutput()
@ -90,13 +90,13 @@ func ForwardedPort(ociBinary string, ociID string, contPort int) (int, error) {
if ociBinary == Podman {
//podman inspect -f "{{range .NetworkSettings.Ports}}{{if eq .ContainerPort "80"}}{{.HostPort}}{{end}}{{end}}"
cmd := exec.Command(ociBinary, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID)
cmd := exec.Command("sudo", ociBinary, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID)
out, err = cmd.CombinedOutput()
if err != nil {
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
}
} else {
cmd := exec.Command(ociBinary, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)
cmd := exec.Command("env", ociBinary, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)
out, err = cmd.CombinedOutput()
if err != nil {
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
@ -141,7 +141,7 @@ func podmanConttainerIP(name string) (string, string, error) {
// dockerContainerIP returns ipv4, ipv6 of container or error
func dockerContainerIP(name string) (string, string, error) {
// retrieve the IP address of the node using docker inspect
lines, err := inspect(Docker, name, "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}")
lines, err := inspect("env", Docker, name, "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}")
if err != nil {
return "", "", errors.Wrap(err, "inspecting NetworkSettings.Networks")
}

View File

@ -40,10 +40,10 @@ import (
// DeleteContainersByLabel deletes all containers that have a specific label
// if there no containers found with the given label, it will return nil
func DeleteContainersByLabel(ociBin string, label string) []error {
func DeleteContainersByLabel(prefix string, ociBin string, label string) []error {
var deleteErrs []error
cs, err := listContainersByLabel(ociBin, label)
cs, err := listContainersByLabel(prefix, ociBin, label)
if err != nil {
return []error{fmt.Errorf("listing containers by label %q", label)}
}
@ -53,7 +53,7 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
}
for _, c := range cs {
_, err := ContainerStatus(ociBin, c)
_, err := ContainerStatus(prefix, ociBin, c)
// only try to delete if docker/podman inspect returns
// if it doesn't it means docker daemon is stuck and needs restart
if err != nil {
@ -61,10 +61,10 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err)
continue
}
if err := ShutDown(ociBin, c); err != nil {
if err := ShutDown(prefix, ociBin, c); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
}
cmd := exec.Command(ociBin, "rm", "-f", "-v", c)
cmd := exec.Command(prefix, ociBin, "rm", "-f", "-v", c)
if out, err := cmd.CombinedOutput(); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
}
@ -74,17 +74,17 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
}
// DeleteContainer deletes a container by ID or Name
func DeleteContainer(ociBin string, name string) error {
func DeleteContainer(prefix string, ociBin string, name string) error {
_, err := ContainerStatus(ociBin, name)
_, err := ContainerStatus(prefix, ociBin, name)
if err != nil {
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. Will try to delete anyways: %v", ociBin, ociBin, err)
}
// try to delete anyways
if err := ShutDown(ociBin, name); err != nil {
if err := ShutDown(prefix, ociBin, name); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
}
cmd := exec.Command(ociBin, "rm", "-f", "-v", name)
cmd := exec.Command(prefix, ociBin, "rm", "-f", "-v", name)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "delete container %s: output %s", name, out)
}
@ -157,18 +157,18 @@ func CreateContainerNode(p CreateParams) error {
// adds node specific args
runArgs = append(runArgs, p.ExtraArgs...)
if enabled := isUsernsRemapEnabled(p.OCIBinary); enabled {
if enabled := isUsernsRemapEnabled(p.OCIPrefix, p.OCIBinary); enabled {
// We need this argument in order to make this command work
// in systems that have userns-remap enabled on the docker daemon
runArgs = append(runArgs, "--userns=host")
}
if err := createContainer(p.OCIBinary, p.Image, withRunArgs(runArgs...), withMounts(p.Mounts), withPortMappings(p.PortMappings)); err != nil {
if err := createContainer(p.OCIPrefix, p.OCIBinary, p.Image, withRunArgs(runArgs...), withMounts(p.Mounts), withPortMappings(p.PortMappings)); err != nil {
return errors.Wrap(err, "create container")
}
checkRunning := func() error {
s, err := ContainerStatus(p.OCIBinary, p.Name)
s, err := ContainerStatus(p.OCIPrefix, p.OCIBinary, p.Name)
if err != nil {
return fmt.Errorf("temporary error checking status for %q : %v", p.Name, err)
}
@ -188,7 +188,7 @@ func CreateContainerNode(p CreateParams) error {
}
// CreateContainer creates a container with "docker/podman run"
func createContainer(ociBinary string, image string, opts ...createOpt) error {
func createContainer(prefix string, ociBin string, image string, opts ...createOpt) error {
o := &createOpts{}
for _, opt := range opts {
o = opt(o)
@ -202,10 +202,10 @@ func createContainer(ociBinary string, image string, opts ...createOpt) error {
runArgs = append(runArgs, generatePortMappings(portMapping)...)
}
// construct the actual docker run argv
args := []string{"run"}
args := []string{ociBin, "run"}
// to run nested container from privileged container in podman https://bugzilla.redhat.com/show_bug.cgi?id=1687713
if ociBinary == Podman {
if ociBin == Podman {
args = append(args, "--cgroup-manager", "cgroupfs")
}
@ -213,7 +213,7 @@ func createContainer(ociBinary string, image string, opts ...createOpt) error {
args = append(args, image)
args = append(args, o.ContainerArgs...)
out, err := exec.Command(ociBinary, args...).CombinedOutput()
out, err := exec.Command(prefix, args...).CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed args: %v output: %s", args, out)
}
@ -222,13 +222,13 @@ func createContainer(ociBinary string, image string, opts ...createOpt) error {
}
// Copy copies a local asset into the container
func Copy(ociBinary string, ociID string, targetDir string, fName string) error {
func Copy(prefix string, ociBin string, ociID string, targetDir string, fName string) error {
if _, err := os.Stat(fName); os.IsNotExist(err) {
return errors.Wrapf(err, "error source %s does not exist", fName)
}
destination := fmt.Sprintf("%s:%s", ociID, targetDir)
cmd := exec.Command(ociBinary, "cp", fName, destination)
cmd := exec.Command(prefix, ociBin, "cp", fName, destination)
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "error copying %s into node", fName)
}
@ -237,8 +237,8 @@ func Copy(ociBinary string, ociID string, targetDir string, fName string) error
}
// ContainerID returns id of a container name
func ContainerID(ociBinary string, nameOrID string) (string, error) {
cmd := exec.Command(ociBinary, "inspect", "-f", "{{.Id}}", nameOrID)
func ContainerID(prefix string, ociBin string, nameOrID string) (string, error) {
cmd := exec.Command(prefix, ociBin, "inspect", "-f", "{{.Id}}", nameOrID)
out, err := cmd.CombinedOutput()
if err != nil { // don't return error if not found, only return empty string
@ -287,8 +287,8 @@ func WarnIfSlow(args ...string) ([]byte, error) {
}
// ContainerExists checks if container name exists (either running or exited)
func ContainerExists(ociBin string, name string) (bool, error) {
out, err := WarnIfSlow(ociBin, "ps", "-a", "--format", "{{.Names}}")
func ContainerExists(prefix string, ociBin string, name string) (bool, error) {
out, err := WarnIfSlow(prefix, ociBin, "ps", "-a", "--format", "{{.Names}}")
if err != nil {
return false, errors.Wrapf(err, string(out))
}
@ -305,8 +305,8 @@ func ContainerExists(ociBin string, name string) (bool, error) {
// IsCreatedByMinikube returns true if the container was created by minikube
// with default assumption that it is not created by minikube when we don't know for sure
func IsCreatedByMinikube(ociBinary string, nameOrID string) bool {
cmd := exec.Command(ociBinary, "inspect", nameOrID, "--format", "{{.Config.Labels}}")
func IsCreatedByMinikube(prefix string, ociBin string, nameOrID string) bool {
cmd := exec.Command(prefix, ociBin, "inspect", nameOrID, "--format", "{{.Config.Labels}}")
out, err := cmd.CombinedOutput()
if err != nil {
@ -321,14 +321,14 @@ func IsCreatedByMinikube(ociBinary string, nameOrID string) bool {
}
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
func ListOwnedContainers(ociBinary string) ([]string, error) {
return listContainersByLabel(ociBinary, ProfileLabelKey)
func ListOwnedContainers(prefix string, ociBin string) ([]string, error) {
return listContainersByLabel(prefix, ociBin, ProfileLabelKey)
}
// inspect return low-level information on containers
func inspect(ociBinary string, containerNameOrID, format string) ([]string, error) {
func inspect(prefix string, ociBin string, containerNameOrID, format string) ([]string, error) {
cmd := exec.Command(ociBinary, "inspect",
cmd := exec.Command(prefix, ociBin, "inspect",
"-f", format,
containerNameOrID) // ... against the "node" container
var buff bytes.Buffer
@ -390,8 +390,8 @@ func generateMountBindings(mounts ...Mount) []string {
}
// isUsernsRemapEnabled checks if userns-remap is enabled in docker
func isUsernsRemapEnabled(ociBinary string) bool {
cmd := exec.Command(ociBinary, "info", "--format", "'{{json .SecurityOptions}}'")
func isUsernsRemapEnabled(prefix string, ociBin string) bool {
cmd := exec.Command(prefix, ociBin, "info", "--format", "'{{json .SecurityOptions}}'")
var buff bytes.Buffer
cmd.Stdout = &buff
cmd.Stderr = &buff
@ -453,8 +453,8 @@ func withPortMappings(portMappings []PortMapping) createOpt {
}
// listContainersByLabel returns all the container names with a specified label
func listContainersByLabel(ociBinary string, label string) ([]string, error) {
stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
func listContainersByLabel(prefix string, ociBin string, label string) ([]string, error) {
stdout, err := WarnIfSlow(prefix, ociBin, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
if err != nil {
return nil, err
}
@ -489,8 +489,8 @@ func PointToHostDockerDaemon() error {
}
// ContainerStatus returns status of a container running,exited,...
func ContainerStatus(ociBin string, name string) (state.State, error) {
out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}")
func ContainerStatus(prefix string, ociBin string, name string) (state.State, error) {
out, err := WarnIfSlow(prefix, ociBin, "inspect", name, "--format={{.State.Status}}")
o := strings.TrimSpace(string(out))
switch o {
case "running":
@ -511,8 +511,8 @@ func ContainerStatus(ociBin string, name string) (state.State, error) {
// Shutdown will run command to shut down the container
// to ensure the containers process and networking bindings are all closed
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
func ShutDown(ociBin string, name string) error {
cmd := exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")
func ShutDown(prefix string, ociBin string, name string) error {
cmd := exec.Command(prefix, ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")
if out, err := cmd.CombinedOutput(); err != nil {
glog.Infof("error shutdown %s output %q : %v", name, out, err)
}
@ -520,7 +520,7 @@ func ShutDown(ociBin string, name string) error {
time.Sleep(time.Second * 1)
// wait till it is stoped
stopped := func() error {
st, err := ContainerStatus(ociBin, name)
st, err := ContainerStatus(prefix, ociBin, name)
if st == state.Stopped {
glog.Infof("container %s status is %s", name, st)
return nil

View File

@ -47,6 +47,7 @@ type CreateParams struct {
Memory string // memory (mbs) to assign to the container
Envs map[string]string // environment variables to pass to the container
ExtraArgs []string // a list of any extra option to pass to oci binary during creation time, for example --expose 8080...
OCIPrefix string // env or sudo
OCIBinary string // docker or podman
}

View File

@ -29,18 +29,18 @@ import (
// DeleteAllVolumesByLabel deletes all volumes that have a specific label
// if there is no volume to delete it will return nil
func DeleteAllVolumesByLabel(ociBin string, label string) []error {
func DeleteAllVolumesByLabel(prefix string, ociBin string, label string) []error {
var deleteErrs []error
glog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
vs, err := allVolumesByLabel(ociBin, label)
vs, err := allVolumesByLabel(prefix, ociBin, label)
if err != nil {
return []error{fmt.Errorf("listing volumes by label %q: %v", label, err)}
}
for _, v := range vs {
if _, err := WarnIfSlow(ociBin, "volume", "rm", "--force", v); err != nil {
if _, err := WarnIfSlow(prefix, ociBin, "volume", "rm", "--force", v); err != nil {
deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v))
}
}
@ -51,11 +51,11 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error {
// PruneAllVolumesByLabel deletes all volumes that have a specific label
// if there is no volume to delete it will return nil
// example: docker volume prune -f --filter label=name.minikube.sigs.k8s.io=minikube
func PruneAllVolumesByLabel(ociBin string, label string) []error {
func PruneAllVolumesByLabel(prefix string, ociBin string, label string) []error {
var deleteErrs []error
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
if _, err := WarnIfSlow(ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil {
if _, err := WarnIfSlow(prefix, ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil {
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label))
}
@ -64,8 +64,8 @@ func PruneAllVolumesByLabel(ociBin string, label string) []error {
// allVolumesByLabel returns name of all docker volumes by a specific label
// will not return error if there is no volume found.
func allVolumesByLabel(ociBin string, label string) ([]string, error) {
cmd := exec.Command(ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}")
func allVolumesByLabel(prefix string, ociBin string, label string) ([]string, error) {
cmd := exec.Command(prefix, ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}")
stdout, err := cmd.Output()
s := bufio.NewScanner(bytes.NewReader(stdout))
var vols []string

View File

@ -49,6 +49,7 @@ type Config struct {
CPU int // Number of CPU cores assigned to the container
Memory int // max memory in MB
StorePath string // libmachine store path
OCIPrefix string // prefix to use (env, sudo, ...)
OCIBinary string // oci tool to use (docker, podman,...)
ImageDigest string // image name with sha to use for the node
Mounts []oci.Mount // mounts

View File

@ -38,19 +38,22 @@ import (
// It implements the CommandRunner interface.
type kicRunner struct {
nameOrID string
prefix string
ociBin string
}
// NewKICRunner returns a kicRunner implementor of runner which runs cmds inside a container
func NewKICRunner(containerNameOrID string, oci string) Runner {
func NewKICRunner(containerNameOrID string, prefix string, oci string) Runner {
return &kicRunner{
nameOrID: containerNameOrID,
ociBin: oci, // docker or podman
prefix: prefix, // env or sudo
ociBin: oci, // docker or podman
}
}
func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
args := []string{
k.ociBin,
"exec",
// run with privileges so we can remount etc..
"--privileged",
@ -81,7 +84,7 @@ func (k *kicRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
args,
cmd.Args...,
)
oc := exec.Command(k.ociBin, args...)
oc := exec.Command(k.prefix, args...)
oc.Stdin = cmd.Stdin
oc.Stdout = cmd.Stdout
oc.Stderr = cmd.Stderr
@ -199,7 +202,7 @@ func (k *kicRunner) chmod(dst string, perm string) error {
// Podman cp command doesn't match docker and doesn't have -a
func copyToPodman(src string, dest string) error {
if out, err := exec.Command(oci.Podman, "cp", src, dest).CombinedOutput(); err != nil {
if out, err := exec.Command("sudo", oci.Podman, "cp", src, dest).CombinedOutput(); err != nil {
return errors.Wrapf(err, "podman copy %s into %s, output: %s", src, dest, string(out))
}
return nil

View File

@ -203,7 +203,7 @@ func ListProfiles(miniHome ...string) (validPs []*Profile, inValidPs []*Profile,
return nil, nil, err
}
// try to get profiles list based on all contrainers created by docker driver
cs, err := oci.ListOwnedContainers(oci.Docker)
cs, err := oci.ListOwnedContainers("env", oci.Docker)
if err == nil {
pDirs = append(pDirs, cs...)
}

View File

@ -126,7 +126,7 @@ func BareMetal(name string) bool {
// NeedsRoot returns true if driver needs to run with root privileges
func NeedsRoot(name string) bool {
return name == None || name == Podman
return name == None
}
// NeedsPortForward returns true if driver is unable provide direct IP connectivity

View File

@ -35,12 +35,12 @@ import (
// deleteOrphanedKIC attempts to delete an orphaned docker instance for machines without a config file
// used as last effort clean up not returning errors, wont warn user.
func deleteOrphanedKIC(ociBin string, name string) {
func deleteOrphanedKIC(prefix string, ociBin string, name string) {
if !(ociBin == oci.Podman || ociBin == oci.Docker) {
return
}
_, err := oci.ContainerStatus(ociBin, name)
_, err := oci.ContainerStatus(prefix, ociBin, name)
if err != nil {
glog.Infof("couldn't inspect container %q before deleting, %s-daemon might needs a restart!: %v", name, ociBin, err)
return
@ -49,10 +49,10 @@ func deleteOrphanedKIC(ociBin string, name string) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := oci.ShutDown(ociBin, name); err != nil {
if err := oci.ShutDown(prefix, ociBin, name); err != nil {
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
}
cmd := exec.CommandContext(ctx, ociBin, "rm", "-f", "-v", name)
cmd := exec.CommandContext(ctx, prefix, ociBin, "rm", "-f", "-v", name)
err = cmd.Run()
if err == nil {
glog.Infof("Found stale kic container and successfully cleaned it up!")
@ -63,8 +63,8 @@ func deleteOrphanedKIC(ociBin string, name string) {
func DeleteHost(api libmachine.API, machineName string) error {
host, err := api.Load(machineName)
if err != nil && host == nil {
deleteOrphanedKIC(oci.Docker, machineName)
deleteOrphanedKIC(oci.Podman, machineName)
deleteOrphanedKIC("env", oci.Docker, machineName)
deleteOrphanedKIC("sudo", oci.Podman, machineName)
// Keep going even if minikube does not know about the host
}

View File

@ -80,7 +80,7 @@ func trySSHPowerOff(h *host.Host) error {
out.T(out.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name})
// differnet for kic because RunSSHCommand is not implemented by kic
if driver.IsKIC(h.DriverName) {
err := oci.ShutDown(h.DriverName, h.Name)
err := oci.ShutDown("sudo", h.DriverName, h.Name)
glog.Infof("shutdown container: err=%v", err)
} else {
out, err := h.RunSSHCommand("sudo poweroff")

View File

@ -47,7 +47,7 @@ func init() {
if err := registry.Register(registry.DriverDef{
Name: driver.Docker,
Config: configure,
Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIBinary: oci.Docker}) },
Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIPrefix: "env", OCIBinary: oci.Docker}) },
Status: status,
Priority: priority,
}); err != nil {
@ -62,6 +62,7 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
ImageDigest: kic.BaseImage,
CPU: cc.CPUs,
Memory: cc.Memory,
OCIPrefix: "env",
OCIBinary: oci.Docker,
APIServerPort: cc.Nodes[0].Port,
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,

View File

@ -41,7 +41,7 @@ func init() {
if err := registry.Register(registry.DriverDef{
Name: driver.Podman,
Config: configure,
Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIBinary: oci.Podman}) },
Init: func() drivers.Driver { return kic.NewDriver(kic.Config{OCIPrefix: "sudo", OCIBinary: oci.Podman}) },
Status: status,
Priority: registry.Experimental,
}); err != nil {
@ -56,6 +56,7 @@ func configure(cc config.ClusterConfig, n config.Node) (interface{}, error) {
ImageDigest: strings.Split(kic.BaseImage, "@")[0], // for podman does not support docker images references with both a tag and digest.
CPU: cc.CPUs,
Memory: cc.Memory,
OCIPrefix: "sudo",
OCIBinary: oci.Podman,
APIServerPort: cc.Nodes[0].Port,
}), nil
@ -71,7 +72,7 @@ func status() registry.State {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, oci.Podman, "version", "-f", "{{.Version}}")
cmd := exec.CommandContext(ctx, "sudo", oci.Podman, "version", "-f", "{{.Version}}")
o, err := cmd.CombinedOutput()
output := string(o)
if err != nil {
@ -89,7 +90,7 @@ func status() registry.State {
// Allow no more than 3 seconds for querying state
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
err = exec.CommandContext(ctx, oci.Podman, "info").Run()
err = exec.CommandContext(ctx, "sudo", oci.Podman, "info").Run()
if err != nil {
return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Podman is not running or taking too long to respond. Try: restarting podman."}
}