Merge pull request #7244 from tstromberg/none-detect
Avoid provision.Detector when possiblepull/7261/head^2
commit
1fac168ca3
|
@ -60,21 +60,24 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
return h, errors.Wrap(err, "Error loading existing host. Please try running [minikube delete], then run [minikube start] again.")
|
||||
}
|
||||
|
||||
driverName := h.Driver.DriverName()
|
||||
|
||||
// check if need to re-run docker-env
|
||||
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
|
||||
maybeWarnAboutEvalEnv(driverName, cc.Name)
|
||||
|
||||
h, err = recreateIfNeeded(api, cc, n, h)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
|
||||
// Technically, we should only have to call provision if Docker has changed,
|
||||
// but who can predict what shape the existing VM is in.
|
||||
e := engineOptions(cc)
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
err = provisionDockerMachine(h)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
// Avoid reprovisioning "none" driver because provision.Detect requires SSH
|
||||
if !driver.BareMetal(h.Driver.DriverName()) {
|
||||
e := engineOptions(cc)
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
err = provisionDockerMachine(h)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
}
|
||||
|
||||
if driver.IsMock(h.DriverName) {
|
||||
|
@ -86,11 +89,11 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
}
|
||||
|
||||
if driver.BareMetal(h.Driver.DriverName()) {
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", driverName)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
return h, ensureSyncedGuestClock(h, driverName)
|
||||
}
|
||||
|
||||
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
|
||||
|
|
|
@ -18,13 +18,14 @@ package machine
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
|
||||
"github.com/docker/machine/libmachine/drivers"
|
||||
"github.com/docker/machine/libmachine/provision"
|
||||
"github.com/golang/glog"
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/shirou/gopsutil/disk"
|
||||
"github.com/shirou/gopsutil/mem"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
|
@ -80,18 +81,17 @@ func showLocalOsRelease() {
|
|||
}
|
||||
|
||||
// logRemoteOsRelease shows systemd information about the current linux distribution, on the remote VM
|
||||
func logRemoteOsRelease(drv drivers.Driver) {
|
||||
provisioner, err := provision.DetectProvisioner(drv)
|
||||
func logRemoteOsRelease(r command.Runner) {
|
||||
rr, err := r.RunCmd(exec.Command("cat", "/etc/os-release"))
|
||||
if err != nil {
|
||||
glog.Errorf("DetectProvisioner: %v", err)
|
||||
glog.Infof("remote release failed: %v", err)
|
||||
}
|
||||
|
||||
osReleaseInfo, err := provision.NewOsRelease(rr.Stdout.Bytes())
|
||||
if err != nil {
|
||||
glog.Errorf("NewOsRelease: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
osReleaseInfo, err := provisioner.GetOsReleaseInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("GetOsReleaseInfo: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Provisioned with %s", osReleaseInfo.PrettyName)
|
||||
glog.Infof("Remote host: %s", osReleaseInfo.PrettyName)
|
||||
}
|
||||
|
|
|
@ -212,7 +212,7 @@ func postStartSetup(h *host.Host, mc config.ClusterConfig) error {
|
|||
showLocalOsRelease()
|
||||
}
|
||||
if driver.IsVM(mc.Driver) {
|
||||
logRemoteOsRelease(h.Driver)
|
||||
logRemoteOsRelease(r)
|
||||
}
|
||||
return syncLocalAssets(r)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue