use oci to delete
parent
b2bcd9a5cb
commit
54aa958f01
|
@ -26,7 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/log"
|
||||
"github.com/docker/machine/libmachine/ssh"
|
||||
"github.com/docker/machine/libmachine/state"
|
||||
"github.com/golang/glog"
|
||||
|
@ -269,16 +268,19 @@ func (d *Driver) Kill() error {
|
|||
// Remove will delete the Kic Node Container
|
||||
func (d *Driver) Remove() error {
|
||||
if _, err := oci.ContainerID(d.OCIBinary, d.MachineName); err != nil {
|
||||
log.Warnf("could not find the container %s to remove it.", d.MachineName)
|
||||
glog.Info("could not find the container %s to remove it. will try anyways", d.MachineName)
|
||||
}
|
||||
cmd := exec.Command(d.NodeConfig.OCIBinary, "rm", "-f", "-v", d.MachineName)
|
||||
o, err := cmd.CombinedOutput()
|
||||
out := strings.TrimSpace(string(o))
|
||||
if err != nil {
|
||||
if strings.Contains(out, "is already in progress") {
|
||||
log.Warnf("Docker engine is stuck. please restart docker daemon on your computer.", d.MachineName)
|
||||
|
||||
if err := oci.DeleteContainer(d.NodeConfig.OCIBinary, d.MachineName); err != nil {
|
||||
if strings.Contains(err.Error(), "is already in progress") {
|
||||
glog.Warningf("Docker engine is stuck. please restart docker daemon on your computer.", d.MachineName)
|
||||
return err
|
||||
}
|
||||
return errors.Wrapf(err, "removing container %s, output %s", d.MachineName, out)
|
||||
if strings.Contains(err.Error(), "No such container:") {
|
||||
glog.Info("no container name %q found to delete", d.MachineName)
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -60,6 +60,9 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
|
|||
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. :%v", ociBin, ociBin, err)
|
||||
continue
|
||||
}
|
||||
if err := ShutDown(ociBin, c); err != nil {
|
||||
glog.Info("couldn't shut down %s (might be okay): %v ", c, err)
|
||||
}
|
||||
cmd := exec.Command(ociBin, "rm", "-f", "-v", c)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
|
||||
|
@ -77,6 +80,9 @@ func DeleteContainer(ociBin string, name string) error {
|
|||
glog.Errorf("%s daemon seems to be stuck. Please try restarting your %s. Will try to delete anyways: %v", ociBin, ociBin, err)
|
||||
}
|
||||
// try to delete anyways
|
||||
if err := ShutDown(ociBin, name); err != nil {
|
||||
glog.Info("couldn't shut down %s (might be okay): %v ", name, err)
|
||||
}
|
||||
cmd := exec.Command(ociBin, "rm", "-f", "-v", name)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "delete container %s: output %s", name, out)
|
||||
|
@ -486,3 +492,14 @@ func ContainerStatus(ociBin string, name string) (string, error) {
|
|||
out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}")
|
||||
return strings.TrimSpace(string(out)), err
|
||||
}
|
||||
|
||||
// Shutdown will run command to shut down the container
|
||||
// to ensure the containers process and networking bindings are all closed
|
||||
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
|
||||
func ShutDown(ociBin string, name string) error {
|
||||
cmd := exec.Command(ociBin, "exec", "-it", name, "sudo init 0")
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "shutdown %s: output %q", name, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ const (
|
|||
nodeRoleLabelKey = "role.minikube.sigs.k8s.io"
|
||||
// CreatedByLabelKey is applied to any container/volume that is created by minikube created_by.minikube.sigs.k8s.io=true
|
||||
CreatedByLabelKey = "created_by.minikube.sigs.k8s.io"
|
||||
// ShutDownCmd is the command halt and stop the container
|
||||
ShutDownCmd = "sudo init 0"
|
||||
// ShutownCmd is the command halt and stop the container
|
||||
ShutownCmd = "sudo init 0"
|
||||
)
|
||||
|
||||
// CreateParams are parameters needed to create a container
|
||||
|
|
|
@ -49,6 +49,9 @@ func deleteOrphanedKIC(ociBin string, name string) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := oci.ShutDown(ociBin, name); err != nil {
|
||||
glog.Info("couldn't shut down %s (might be okay): %v ", name, err)
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, ociBin, "rm", "-f", "-v", name)
|
||||
err = cmd.Run()
|
||||
if err == nil {
|
||||
|
|
|
@ -46,7 +46,7 @@ func StopHost(api libmachine.API, machineName string) error {
|
|||
// stop forcibly stops a host without needing to load
|
||||
func stop(h *host.Host) error {
|
||||
start := time.Now()
|
||||
if h.DriverName == driver.HyperV {
|
||||
if driver.NeedsShutdown(h.DriverName) {
|
||||
glog.Infof("As there are issues with stopping Hyper-V VMs using API, trying to shut down using SSH")
|
||||
if err := trySSHPowerOff(h); err != nil {
|
||||
return errors.Wrap(err, "ssh power off")
|
||||
|
@ -80,8 +80,8 @@ func trySSHPowerOff(h *host.Host) error {
|
|||
|
||||
out.T(out.Shutdown, `Powering off "{{.profile_name}}" via SSH ...`, out.V{"profile_name": h.Name})
|
||||
if driver.IsKIC(h.DriverName) {
|
||||
out, err := h.RunSSHCommand(oci.ShutDownCmd)
|
||||
glog.Infof("shutdown cmd %q result: out=%s, err=%v", oci.ShutDownCmd, out, err)
|
||||
out, err := h.RunSSHCommand(oci.ShutownCmd)
|
||||
glog.Infof("shutdown cmd %q result: out=%s, err=%v", oci.ShutownCmd, out, err)
|
||||
} else {
|
||||
out, err := h.RunSSHCommand("sudo poweroff")
|
||||
// poweroff always results in an error, since the host disconnects.
|
||||
|
|
Loading…
Reference in New Issue