add option to exec runner to point to host docker daemon

pull/6492/head
Medya Gh 2020-02-04 15:33:04 -08:00
parent f6c7048392
commit c86b20ade2
9 changed files with 77 additions and 19 deletions

View File

@ -163,7 +163,7 @@ REM @FOR /f "tokens=*" %%i IN ('%s') DO @%%i
func shellCfgSet(api libmachine.API) (*ShellConfig, error) {
envMap, err := cluster.GetHostDockerEnv(api)
envMap, err := cluster.GetNodeDockerEnv(api)
if err != nil {
return nil, err
}

View File

@ -64,7 +64,7 @@ type Config struct {
// NewDriver returns a fully configured None driver
func NewDriver(c Config) *Driver {
runner := &command.ExecRunner{}
runner := command.NewExecRunner()
runtime, err := cruntime.New(cruntime.Config{Type: c.ContainerRuntime, Runner: runner})
// Libraries shouldn't panic, but there is no way for drivers to return error :(
if err != nil {

View File

@ -338,7 +338,7 @@ func TestGetHostStatus(t *testing.T) {
checkState(state.Stopped.String())
}
func TestGetHostDockerEnv(t *testing.T) {
func TestGetNodeDockerEnv(t *testing.T) {
RegisterMockDriver(t)
tempDir := tests.MakeTempDir()
defer os.RemoveAll(tempDir)
@ -356,7 +356,7 @@ func TestGetHostDockerEnv(t *testing.T) {
}
h.Driver = d
envMap, err := GetHostDockerEnv(api)
envMap, err := GetNodeDockerEnv(api)
if err != nil {
t.Fatalf("Unexpected error getting env: %v", err)
}
@ -374,7 +374,7 @@ func TestGetHostDockerEnv(t *testing.T) {
}
}
func TestGetHostDockerEnvIPv6(t *testing.T) {
func TestGetNodeDockerEnvIPv6(t *testing.T) {
RegisterMockDriver(t)
tempDir := tests.MakeTempDir()
@ -393,7 +393,7 @@ func TestGetHostDockerEnvIPv6(t *testing.T) {
}
h.Driver = d
envMap, err := GetHostDockerEnv(api)
envMap, err := GetNodeDockerEnv(api)
if err != nil {
t.Fatalf("Unexpected error getting env: %v", err)
}

View File

@ -31,8 +31,8 @@ import (
"k8s.io/minikube/pkg/minikube/localpath"
)
// GetHostDockerEnv gets the necessary docker env variables to allow the use of docker through minikube's vm
func GetHostDockerEnv(api libmachine.API) (map[string]string, error) {
// GetNodeDockerEnv gets the necessary docker env variables to allow the use of docker through minikube's vm
func GetNodeDockerEnv(api libmachine.API) (map[string]string, error) {
pName := viper.GetString(config.MachineProfile)
host, err := CheckIfHostExistsAndLoad(api, pName)
if err != nil {
@ -58,9 +58,10 @@ func GetHostDockerEnv(api libmachine.API) (map[string]string, error) {
}
envMap := map[string]string{
constants.DockerTLSVerifyEnv: "1",
constants.DockerHostEnv: tcpPrefix + net.JoinHostPort(ip, fmt.Sprint(port)),
constants.DockerCertPathEnv: localpath.MakeMiniPath("certs"),
constants.DockerTLSVerifyEnv: "1",
constants.DockerHostEnv: tcpPrefix + net.JoinHostPort(ip, fmt.Sprint(port)),
constants.DockerCertPathEnv: localpath.MakeMiniPath("certs"),
constants.MinikubeActiveDockerdEnv: pName,
}
return envMap, nil
}

View File

@ -174,7 +174,7 @@ func commandRunner(h *host.Host) (command.Runner, error) {
return &command.FakeCommandRunner{}, nil
}
if driver.BareMetal(h.Driver.DriverName()) {
return &command.ExecRunner{}, nil
return command.NewExecRunner(), nil
}
if h.Driver.DriverName() == driver.Docker {
return command.NewKICRunner(h.Name, "docker"), nil

View File

@ -30,17 +30,36 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
)
// ExecRunner runs commands using the os/exec package.
//
// It implements the CommandRunner interface.
type ExecRunner struct{}
type execRunner struct {
EnsureUsingHostDaemon bool // if set it ensures that the command runs against the host docker (podman) daemon not the one inside minikube
}
// NewExecRunner returns a kicRunner implementor of runner which runs cmds inside a container
func NewExecRunner(ensureHostDaemon ...bool) Runner {
if len(ensureHostDaemon) > 0 {
return &execRunner{
EnsureUsingHostDaemon: ensureHostDaemon[0]}
} else {
return &execRunner{}
}
}
// RunCmd implements the Command Runner interface to run a exec.Cmd object
func (*ExecRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
func (e *execRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
rr := &RunResult{Args: cmd.Args}
glog.Infof("Run: %v", rr.Command())
if e.EnsureUsingHostDaemon {
err := pointToHostDockerDaemon()
if err != nil {
return rr, errors.Wrap(err, "pointing to correct docker-daemon")
}
}
var outb, errb io.Writer
if cmd.Stdout == nil {
@ -79,7 +98,13 @@ func (*ExecRunner) RunCmd(cmd *exec.Cmd) (*RunResult, error) {
}
// Copy copies a file and its permissions
func (*ExecRunner) Copy(f assets.CopyableFile) error {
func (e *execRunner) Copy(f assets.CopyableFile) error {
if e.EnsureUsingHostDaemon {
err := pointToHostDockerDaemon()
if err != nil {
return errors.Wrap(err, "pointing to correct docker-daemon")
}
}
targetPath := path.Join(f.GetTargetDir(), f.GetTargetName())
if _, err := os.Stat(targetPath); err == nil {
if err := os.Remove(targetPath); err != nil {
@ -108,7 +133,36 @@ do you have the correct permissions?`,
}
// Remove removes a file
func (e *ExecRunner) Remove(f assets.CopyableFile) error {
func (e *execRunner) Remove(f assets.CopyableFile) error {
if e.EnsureUsingHostDaemon {
err := pointToHostDockerDaemon()
if err != nil {
return errors.Wrap(err, "pointing to correct docker-daemon")
}
}
targetPath := filepath.Join(f.GetTargetDir(), f.GetTargetName())
return os.Remove(targetPath)
}
// pointToHostDockerDaemon will unset env variables that point to docker inside minikube
// to make sure it points to the docker daemon installed by user.
func pointToHostDockerDaemon() error {
start := time.Now()
p := os.Getenv(constants.MinikubeActiveDockerdEnv)
if p != "" {
fmt.Println("medya dbg: shell is pointing to docker inside minikube will unset")
}
for i := range constants.DockerDaemonEnvs {
e := constants.DockerDaemonEnvs[i]
fmt.Printf("medya dbg: setting env %s", e)
err := os.Setenv(e, "")
if err != nil {
return errors.Wrapf(err, "resetting %s env", e)
}
}
elapsed := time.Since(start)
fmt.Printf("Done: (%s)\n", elapsed)
return nil
}

View File

@ -39,7 +39,6 @@ const (
ClusterDNSDomain = "cluster.local"
// DefaultServiceCIDR is The CIDR to be used for service cluster IPs
DefaultServiceCIDR = "10.96.0.0/12"
// DockerTLSVerifyEnv is used for docker daemon settings
DockerTLSVerifyEnv = "DOCKER_TLS_VERIFY"
// DockerHostEnv is used for docker daemon settings
@ -48,9 +47,13 @@ const (
DockerCertPathEnv = "DOCKER_CERT_PATH"
// MinikubeActiveDockerdEnv holds the docker daemon which user's shell is pointing at
// value would be profile or empty if pointing to the user's host daemon.
// DockerDaemonEnvs has list of environment variables to control docker daemon shell is using
MinikubeActiveDockerdEnv = "MINIKUBE_ACTIVE_DOCKERD"
)
var DockerDaemonEnvs = [3]string{DockerHostEnv, DockerTLSVerifyEnv, DockerCertPathEnv}
// DefaultMinipath is the default Minikube path (under the home directory)
var DefaultMinipath = filepath.Join(homedir.HomeDir(), ".minikube")

View File

@ -152,7 +152,7 @@ func CommandRunner(h *host.Host) (command.Runner, error) {
return &command.FakeCommandRunner{}, nil
}
if driver.BareMetal(h.Driver.DriverName()) {
return &command.ExecRunner{}, nil
return command.NewExecRunner(), nil
}
if h.Driver.DriverName() == driver.Docker {
return command.NewKICRunner(h.Name, "docker"), nil

View File

@ -117,7 +117,7 @@ func configureAuth(p miniProvisioner) error {
func copyHostCerts(authOptions auth.Options) error {
log.Infof("copyHostCerts")
execRunner := &command.ExecRunner{}
execRunner := command.NewExecRunner()
hostCerts := map[string]string{
authOptions.CaCertPath: path.Join(authOptions.StorePath, "ca.pem"),
authOptions.ClientCertPath: path.Join(authOptions.StorePath, "cert.pem"),