Merge branch 'master' into corresponding

pull/3382/head
Thomas Stromberg 2018-11-29 15:50:29 -08:00
commit 3f1b8a0c02
48 changed files with 504 additions and 145 deletions

View File

@ -25,9 +25,19 @@ Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a
## Installation
### macOS
[Homebrew](https://brew.sh/) is a package manager for macOS that can be used to install Minikube.
After installing Homebrew, run the following at a terminal prompt:
```shell
brew cask install minikube
```
This installs kubernetes-cli package as well. The same can be verified using:
```shell
kubectl version
```
If it's not installed, install it using:
```shell
brew install kubernetes-cli
```
### Linux
@ -139,7 +149,7 @@ the following drivers:
* [hyperkit](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperkit-driver)
* [xhyve](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#xhyve-driver)
* [hyperv](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperV-driver)
* none (**Linux + docker daemon as container runtime only**) - this driver can be used to run the Kubernetes cluster components on the host instead of in a VM. This can be useful for CI workloads which do not support nested virtualization.
* none (**Linux-only**) - this driver can be used to run the Kubernetes cluster components on the host instead of in a VM. This can be useful for CI workloads which do not support nested virtualization.
```shell
$ minikube start

View File

@ -90,7 +90,7 @@ var settings = []Setting{
{
name: "iso-url",
set: SetString,
validations: []setFn{IsValidURL},
validations: []setFn{IsValidURL, IsURLExists},
},
{
name: config.WantUpdateNotification,

View File

@ -18,15 +18,15 @@ package config
import (
"fmt"
"github.com/docker/go-units"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
"net"
"net/url"
"os"
"strconv"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/constants"
"strings"
)
func IsValidDriver(string, driver string) error {
@ -59,6 +59,39 @@ func IsValidURL(name string, location string) error {
return nil
}
func IsURLExists(name string, location string) error {
parsed, err := url.Parse(location)
if err != nil {
return fmt.Errorf("%s is not a valid URL", location)
}
// we can only validate if local files exist, not other urls
if parsed.Scheme != "file" {
return nil
}
// chop off "file://" from the location, giving us the real system path
sysPath := strings.TrimPrefix(location, "file://")
stat, err := os.Stat(sysPath)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("%s does not exist", location)
}
if os.IsPermission(err) {
return fmt.Errorf("%s could not be opened (permission error: %s)", location, err.Error())
}
return err
}
if stat.IsDir() {
return fmt.Errorf("%s is a directory", location)
}
return nil
}
func IsPositive(name string, val string) error {
i, err := strconv.Atoi(val)
if err != nil {

View File

@ -16,7 +16,10 @@ limitations under the License.
package config
import "testing"
import (
"os"
"testing"
)
type validationTest struct {
value string
@ -94,3 +97,25 @@ func TestValidCIDR(t *testing.T) {
runValidations(t, tests, "cidr", IsValidCIDR)
}
func TestIsURLExists(t *testing.T) {
self, err := os.Executable()
if err != nil {
t.Error(err)
}
tests := []validationTest{
{
value: "file://" + self,
shouldErr: false,
},
{
value: "file://" + self + "/subpath-of-file",
shouldErr: true,
},
}
runValidations(t, tests, "url", IsURLExists)
}

View File

@ -19,6 +19,7 @@ package cmd
import (
"bufio"
"fmt"
"io"
"net/http"
"os"
"os/exec"
@ -91,7 +92,7 @@ var dashboardCmd = &cobra.Command{
}
}
glog.Infof("Waiting forever for kubectl proxy to exit ...")
glog.Infof("Success! I will now quietly sit around until kubectl proxy exits!")
if err = p.Wait(); err != nil {
glog.Errorf("Wait: %v", err)
}
@ -117,14 +118,51 @@ func kubectlProxy() (*exec.Cmd, string, error) {
if err := cmd.Start(); err != nil {
return nil, "", errors.Wrap(err, "proxy start")
}
glog.Infof("Waiting for kubectl to output host:port ...")
reader := bufio.NewReader(stdoutPipe)
glog.Infof("proxy started, reading stdout pipe ...")
out, err := reader.ReadString('\n')
if err != nil {
return nil, "", errors.Wrap(err, "reading stdout pipe")
var out []byte
for {
r, timedOut, err := readByteWithTimeout(reader, 5*time.Second)
if err != nil {
return cmd, "", fmt.Errorf("readByteWithTimeout: %v", err)
}
if r == byte('\n') {
break
}
if timedOut {
glog.Infof("timed out waiting for input: possibly due to an old kubectl version.")
break
}
out = append(out, r)
}
glog.Infof("proxy stdout: %s", string(out))
return cmd, hostPortRe.FindString(string(out)), nil
}
// readByteWithTimeout returns a byte from a reader or an indicator that a timeout has occurred.
func readByteWithTimeout(r io.ByteReader, timeout time.Duration) (byte, bool, error) {
bc := make(chan byte)
ec := make(chan error)
go func() {
b, err := r.ReadByte()
if err != nil {
ec <- err
} else {
bc <- b
}
close(bc)
close(ec)
}()
select {
case b := <-bc:
return b, false, nil
case err := <-ec:
return byte(' '), false, err
case <-time.After(timeout):
return byte(' '), true, nil
}
glog.Infof("proxy stdout: %s", out)
return cmd, hostPortRe.FindString(out), nil
}
// dashboardURL generates a URL for accessing the dashboard service

View File

@ -26,6 +26,7 @@ import (
"text/template"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/docker/machine/libmachine/shell"
"github.com/golang/glog"
"github.com/pkg/errors"
@ -293,6 +294,14 @@ func (EnvNoProxyGetter) GetNoProxyVar() (string, string) {
return noProxyVar, noProxyValue
}
func GetDockerActive(host *host.Host) (bool, error) {
statusCmd := `sudo systemctl is-active docker`
status, err := host.RunSSHCommand(statusCmd)
// systemd returns error code on inactive
s := strings.TrimSpace(status)
return err == nil && s == "active", err
}
// envCmd represents the docker-env command
var dockerEnvCmd = &cobra.Command{
Use: "docker-env",
@ -315,6 +324,11 @@ var dockerEnvCmd = &cobra.Command{
fmt.Println(`'none' driver does not support 'minikube docker-env' command`)
os.Exit(0)
}
docker, err := GetDockerActive(host)
if !docker {
fmt.Println(`# The docker service is currently not active`)
os.Exit(1)
}
var shellCfg *ShellConfig

View File

@ -57,6 +57,7 @@ const (
kubernetesVersion = "kubernetes-version"
hostOnlyCIDR = "host-only-cidr"
containerRuntime = "container-runtime"
criSocket = "cri-socket"
networkPlugin = "network-plugin"
hypervVirtualSwitch = "hyperv-virtual-switch"
kvmNetwork = "kvm-network"
@ -141,6 +142,7 @@ func runStart(cmd *cobra.Command, args []string) {
CPUs: viper.GetInt(cpus),
DiskSize: diskSizeMB,
VMDriver: viper.GetString(vmDriver),
ContainerRuntime: viper.GetString(containerRuntime),
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
XhyveDiskDriver: viper.GetString(xhyveDiskDriver),
@ -220,6 +222,7 @@ func runStart(cmd *cobra.Command, args []string) {
DNSDomain: viper.GetString(dnsDomain),
FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime),
CRISocket: viper.GetString(criSocket),
NetworkPlugin: viper.GetString(networkPlugin),
ServiceCIDR: pkgutil.DefaultServiceCIDR,
ExtraOptions: extraOptions,
@ -290,14 +293,47 @@ func runStart(cmd *cobra.Command, args []string) {
cmdutil.MaybeReportErrorAndExit(err)
}
fmt.Println("Starting cluster components...")
fmt.Println("Stopping extra container runtimes...")
if !exists || config.VMDriver == "none" {
containerRuntime := viper.GetString(containerRuntime)
if config.VMDriver != constants.DriverNone && containerRuntime != "" {
if _, err := host.RunSSHCommand("sudo systemctl stop docker"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop docker.socket")
}
if err != nil {
glog.Errorf("Error stopping docker: %v", err)
}
}
if config.VMDriver != constants.DriverNone && (containerRuntime != constants.CrioRuntime && containerRuntime != constants.Cri_oRuntime) {
if _, err := host.RunSSHCommand("sudo systemctl stop crio"); err != nil {
glog.Errorf("Error stopping crio: %v", err)
}
}
if config.VMDriver != constants.DriverNone && containerRuntime != constants.RktRuntime {
if _, err := host.RunSSHCommand("sudo systemctl stop rkt-api"); err == nil {
_, err = host.RunSSHCommand("sudo systemctl stop rkt-metadata")
}
if err != nil {
glog.Errorf("Error stopping rkt: %v", err)
}
}
if config.VMDriver != constants.DriverNone && containerRuntime == constants.ContainerdRuntime {
fmt.Println("Restarting containerd runtime...")
// restart containerd so that it can install all plugins
if _, err := host.RunSSHCommand("sudo systemctl restart containerd"); err != nil {
glog.Errorf("Error restarting containerd: %v", err)
}
}
if !exists || config.VMDriver == constants.DriverNone {
fmt.Println("Starting cluster components...")
if err := k8sBootstrapper.StartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error starting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
}
} else {
fmt.Println("Machine exists, restarting cluster components...")
if err := k8sBootstrapper.RestartCluster(kubernetesConfig); err != nil {
glog.Errorln("Error restarting cluster: ", err)
cmdutil.MaybeReportErrorAndExit(err)
@ -398,6 +434,7 @@ func init() {
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(&registryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(containerRuntime, "", "The container runtime to be used")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")

View File

@ -27,6 +27,7 @@ import (
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/service"
"k8s.io/minikube/pkg/minikube/tunnel"
"time"
)
var cleanup bool
@ -56,7 +57,12 @@ var tunnelCmd = &cobra.Command{
glog.Fatalf("error creating dockermachine client: %s", err)
}
glog.Infof("Creating k8s client...")
clientset, err := service.K8s.GetClientset()
//Tunnel uses the k8s clientset to query the API server for services in the LoadBalancerEmulator.
//We define the tunnel and minikube error free if the API server responds within a second.
//This also contributes to better UX, the tunnel status check can happen every second and
//doesn't hang on the API server call during startup and shutdown time or if there is a temporary error.
clientset, err := service.K8s.GetClientset(1 * time.Second)
if err != nil {
glog.Fatalf("error creating K8S clientset: %s", err)
}

View File

@ -87,7 +87,7 @@ spec:
serviceAccountName: nginx-ingress
terminationGracePeriodSeconds: 60
containers:
- image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.19.0
- image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
name: nginx-ingress-controller
imagePullPolicy: IfNotPresent
readinessProbe:

View File

@ -1,2 +1,3 @@
sha256 d310d52706262009af886dbd3e8dcd09a339cdc3b57dc22a9121e6d6a87d2921 v1.8.4.tar.gz
sha256 9f79cee99e272c9cfc561ae31235d84d4da59fd5c8b3d3ab6623bf9a92d90c5a v1.10.0.tar.gz
sha256 09e53fd550f4f10108879131ee6b8ef1c367ce71a73dcf6350c4cc898751d8c1 v1.11.8.tar.gz

View File

@ -4,8 +4,8 @@
#
################################################################################
CRIO_BIN_VERSION = v1.10.0
CRIO_BIN_SITE = https://github.com/kubernetes-incubator/cri-o/archive
CRIO_BIN_VERSION = v1.11.8
CRIO_BIN_SITE = https://github.com/kubernetes-sigs/cri-o/archive
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
CRIO_BIN_DEPENDENCIES = host-go libgpgme
CRIO_BIN_GOPATH = $(@D)/_output
@ -22,16 +22,14 @@ define CRIO_BIN_USERS
endef
define CRIO_BIN_CONFIGURE_CMDS
mkdir -p $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-incubator
ln -sf $(@D) $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-incubator/cri-o
mkdir -p $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-sigs
ln -sf $(@D) $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-sigs/cri-o
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) install.tools DESTDIR=$(TARGET_DIR) PREFIX=$(TARGET_DIR)/usr
endef
define CRIO_BIN_BUILD_CMDS
mkdir -p $(@D)/bin
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) PREFIX=/usr pause
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) PREFIX=/usr crio
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) PREFIX=/usr conmon
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) PREFIX=/usr binaries
endef
define CRIO_BIN_INSTALL_TARGET_CMDS

View File

@ -24,9 +24,10 @@ Or you can use the extended version:
```shell
$ minikube start \
--network-plugin=cni \
--cri-socket=/var/run/crio/crio.sock \
--extra-config=kubelet.container-runtime=remote \
--extra-config=kubelet.container-runtime-endpoint=/var/run/crio/crio.sock \
--extra-config=kubelet.image-service-endpoint=/var/run/crio/crio.sock
--extra-config=kubelet.container-runtime-endpoint=unix:///var/run/crio/crio.sock \
--extra-config=kubelet.image-service-endpoint=unix:///var/run/crio/crio.sock
```
### Using containerd
@ -44,6 +45,7 @@ Or you can use the extended version:
```shell
$ minikube start \
--network-plugin=cni \
--cri-socket=/run/containerd/containerd.sock \
--extra-config=kubelet.container-runtime=remote \
--extra-config=kubelet.container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--extra-config=kubelet.image-service-endpoint=unix:///run/containerd/containerd.sock

View File

@ -1,4 +1,6 @@
# Adding new driver
# Adding new driver (Deprecated)
New drivers should be added into https://github.com/machine-drivers
Minikube relies on docker machine drivers to manage machines. This document talks about how to
add an existing docker machine driver into minikube registry, so that minikube can use the driver

View File

@ -25,17 +25,17 @@ To install the KVM2 driver, first install and configure the prereqs:
```shell
# Install libvirt and qemu-kvm on your system, e.g.
# Debian/Ubuntu (for older Debian/Ubuntu versions, you may have to use libvirt-bin instead of libvirt-clients and libvirt-daemon-system)
$ sudo apt install libvirt-clients libvirt-daemon-system qemu-kvm
sudo apt install libvirt-clients libvirt-daemon-system qemu-kvm
# Fedora/CentOS/RHEL
$ sudo yum install libvirt-daemon-kvm qemu-kvm
sudo yum install libvirt-daemon-kvm qemu-kvm
# Add yourself to the libvirt group so you don't need to sudo
# NOTE: For older Debian/Ubuntu versions change the group to `libvirtd`
$ sudo usermod -a -G libvirt $(whoami)
sudo usermod -a -G libvirt $(whoami)
# Update your current session for the group change to take effect
# NOTE: For older Debian/Ubuntu versions change the group to `libvirtd`
$ newgrp libvirt
newgrp libvirt
```
Then install the driver itself:
@ -59,17 +59,17 @@ After following the instructions on the KVM driver releases page, you need to ma
```shell
# Install libvirt and qemu-kvm on your system, e.g.
# Debian/Ubuntu (for older Debian/Ubuntu versions, you may have to use libvirt-bin instead of libvirt-clients and libvirt-daemon-system)
$ sudo apt install libvirt-clients libvirt-daemon-system qemu-kvm
sudo apt install libvirt-clients libvirt-daemon-system qemu-kvm
# Fedora/CentOS/RHEL
$ sudo yum install libvirt-daemon-kvm qemu-kvm
sudo yum install libvirt-daemon-kvm qemu-kvm
# Add yourself to the libvirt group so you don't need to sudo
# NOTE: For older Debian/Ubuntu versions change the group to `libvirtd`
$ sudo usermod -a -G libvirt $(whoami)
sudo usermod -a -G libvirt $(whoami)
# Update your current session for the group change to take effect
# NOTE: For older Debian/Ubuntu versions change the group to `libvirtd`
$ newgrp libvirt
newgrp libvirt
```
To use the driver you would do:
@ -96,18 +96,18 @@ If you encountered errors like `Could not find hyperkit executable`, you might n
If you are using [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) in your setup and cluster creation fails (stuck at kube-dns initialization) you might need to add `listen-address=192.168.64.1` to `dnsmasq.conf`.
*Note: If `dnsmasq.conf` contains `listen-address=127.0.0.1` kubernetes discovers dns at 127.0.0.1:53 and tries to use it using bridge ip address, but dnsmasq replies only to reqests from 127.0.0.1*
*Note: If `dnsmasq.conf` contains `listen-address=127.0.0.1` kubernetes discovers dns at 127.0.0.1:53 and tries to use it using bridge ip address, but dnsmasq replies only to requests from 127.0.0.1*
#### xhyve driver
From https://github.com/zchee/docker-machine-driver-xhyve#install:
```shell
$ brew install docker-machine-driver-xhyve
brew install docker-machine-driver-xhyve
# docker-machine-driver-xhyve need root owner and uid
$ sudo chown root:wheel $(brew --prefix)/opt/docker-machine-driver-xhyve/bin/docker-machine-driver-xhyve
$ sudo chmod u+s $(brew --prefix)/opt/docker-machine-driver-xhyve/bin/docker-machine-driver-xhyve
sudo chown root:wheel $(brew --prefix)/opt/docker-machine-driver-xhyve/bin/docker-machine-driver-xhyve
sudo chmod u+s $(brew --prefix)/opt/docker-machine-driver-xhyve/bin/docker-machine-driver-xhyve
```
#### HyperV driver

View File

@ -136,7 +136,7 @@ for service in services:
sleep
```
Note that the Minikube ClusterIP can change over time (during system reboots) and this loop should also handle reconcilliation of those changes.
Note that the Minikube ClusterIP can change over time (during system reboots) and this loop should also handle reconciliation of those changes.
## Handling multiple clusters

View File

@ -188,10 +188,11 @@ ${SUDO_PREFIX} rm -f "${KUBECONFIG}"
rmdir "${TEST_HOME}"
echo ">> ${TEST_HOME} completed at $(date)"
readonly target_url="https://storage.googleapis.com/minikube-builds/logs/${MINIKUBE_LOCATION}/${JOB_NAME}.txt"
curl "https://api.github.com/repos/kubernetes/minikube/statuses/${COMMIT}?access_token=$access_token" \
if [[ "${MINIKUBE_LOCATION" != "master" ]]; then
readonly target_url="https://storage.googleapis.com/minikube-builds/logs/${MINIKUBE_LOCATION}/${JOB_NAME}.txt"
curl "https://api.github.com/repos/kubernetes/minikube/statuses/${COMMIT}?access_token=$access_token" \
-H "Content-Type: application/json" \
-X POST \
-d "{\"state\": \"$status\", \"description\": \"Jenkins\", \"target_url\": \"$target_url\", \"context\": \"${JOB_NAME}\"}"
fi
exit $result

View File

@ -21,31 +21,35 @@
# ghprbPullId: The pull request ID, injected from the ghpbr plugin.
# ghprbActualCommit: The commit hash, injected from the ghpbr plugin.
set -e
set -eux -o pipefail
readonly bucket="minikube-builds"
declare -rx BUILD_IN_DOCKER=y
declare -rx GOPATH=/var/lib/jenkins/go
declare -rx ISO_BUCKET="${bucket}/${ghprbPullId}"
declare -rx ISO_VERSION="testing"
declare -rx TAG="${ghprbActualCommit}"
export BUILD_IN_DOCKER=y
export TAG=$ghprbActualCommit
export GOPATH=/var/lib/jenkins/go
docker kill $(docker ps -q) || true
docker rm $(docker ps -aq) || true
set +e
make -j 16 all
set -e
make -j 16 all && failed=$? || failed=$?
make_result="$?"
gsutil cp gs://minikube-builds/logs/index.html gs://minikube-builds/logs/${ghprbPullId}/index.html
gsutil cp "gs://${bucket}/logs/index.html" \
"gs://${bucket}/logs/${ghprbPullId}/index.html"
# Exit if the cross build failed.
if [ "$make_result"-ne 0 ]; then echo "cross build failed"; exit 1; fi
if [[ "${failed}" -ne 0 ]]; then
echo "build failed"
exit "${failed}"
fi
# If there are ISO changes, build and upload the ISO
# then set the default to the newly built ISO for testing
if out="$(git diff ${ghprbActualCommit} --name-only $(git merge-base origin/master ${ghprbActualCommit}) | grep deploy/iso/minikube)" &> /dev/null; then
git diff ${ghprbActualCommit} --name-only \
$(git merge-base origin/master ${ghprbActualCommit}) \
| grep -q deploy/iso/minikube && rebuild=1 || rebuild=0
if [[ "${rebuild}" -eq 1 ]]; then
echo "ISO changes detected ... rebuilding ISO"
export ISO_BUCKET="minikube-builds/${ghprbPullId}"
export ISO_VERSION="testing"
make release-iso
fi
@ -54,5 +58,4 @@ cp -r test/integration/testdata out/
# Don't upload the buildroot artifacts if they exist
rm -r out/buildroot || true
# Upload everything we built to Cloud Storage.
gsutil -m cp -r out/* gs://minikube-builds/${ghprbPullId}/
gsutil -m cp -r out/* "gs://${bucket}/${ghprbPullId}/"

View File

@ -27,7 +27,12 @@
set -e
set +x
for job in "Minishift-Linux-KVM" "OSX-Virtualbox" "OSX-Hyperkit" "Linux-Virtualbox" "Linux-KVM" "Linux-None" "Windows-Virtualbox" "Windows-Kubeadm-CRI-O" "Linux-Container"; do
if [ "${ghprbPullId}" == "master" ]; then
echo "not setting github status for continuous builds"
exit 0
fi
for job in "OSX-Virtualbox" "OSX-Hyperkit" "Linux-Virtualbox" "Linux-KVM" "Linux-None"; do
target_url="https://storage.googleapis.com/minikube-builds/logs/${ghprbPullId}/${job}.txt"
curl "https://api.github.com/repos/kubernetes/minikube/statuses/${ghprbActualCommit}?access_token=$access_token" \
-H "Content-Type: application/json" \

View File

@ -40,7 +40,8 @@ var dockerkillcmd = fmt.Sprintf(`docker rm $(%s)`, dockerstopcmd)
type Driver struct {
*drivers.BaseDriver
*pkgdrivers.CommonDriver
URL string
URL string
ContainerRuntime string
}
func NewDriver(hostName, storePath string) *Driver {
@ -54,11 +55,13 @@ func NewDriver(hostName, storePath string) *Driver {
// PreCreateCheck checks for correct privileges and dependencies
func (d *Driver) PreCreateCheck() error {
// check that docker is on path
_, err := exec.LookPath("docker")
if err != nil {
return errors.Wrap(err, "docker cannot be found on the path for this machine. "+
"A docker installation is a requirement for using the none driver")
if d.ContainerRuntime == "" {
// check that docker is on path
_, err := exec.LookPath("docker")
if err != nil {
return errors.Wrap(err, "docker cannot be found on the path for this machine. "+
"A docker installation is a requirement for using the none driver")
}
}
return nil
@ -136,11 +139,16 @@ func (d *Driver) Remove() error {
sudo rm -rf /etc/kubernetes/manifests
sudo rm -rf /var/lib/minikube || true`
for _, cmdStr := range []string{rmCmd, dockerkillcmd} {
for _, cmdStr := range []string{rmCmd} {
if out, err := runCommand(cmdStr, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, cmdStr, out)
}
}
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerkillcmd, true); err != nil {
glog.Warningf("Error %v running command: %s, Output: %s", err, dockerkillcmd, out)
}
}
return nil
}
@ -191,8 +199,10 @@ fi
break
}
}
if out, err := runCommand(dockerstopcmd, false); err != nil {
glog.Warningf("Error %v running command %s. Output: %s", err, dockerstopcmd, out)
if d.ContainerRuntime == "" {
if out, err := runCommand(dockerstopcmd, false); err != nil {
glog.Warningf("Error %v running command %s. Output: %s", err, dockerstopcmd, out)
}
}
return nil
}

View File

@ -104,7 +104,7 @@ func (f *FakeCommandRunner) SetCommandToOutput(cmdToOutput map[string]string) {
}
}
// SetFileToContents stores the file to contents map for the FakeCommandRunner
// GetFileToContents stores the file to contents map for the FakeCommandRunner
func (f *FakeCommandRunner) GetFileToContents(filename string) (string, error) {
contents, ok := f.fileMap.Load(filename)
if !ok {

View File

@ -115,6 +115,10 @@ func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error {
}
b := bytes.Buffer{}
preflights := constants.Preflights
if k8s.ContainerRuntime != "" {
preflights = constants.AlternateRuntimePreflights
}
templateContext := struct {
KubeadmConfigFile string
SkipPreflightChecks bool
@ -125,7 +129,7 @@ func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error {
SkipPreflightChecks: !VersionIsBetween(version,
semver.MustParse("1.9.0-alpha.0"),
semver.Version{}),
Preflights: constants.Preflights,
Preflights: preflights,
DNSAddon: "kube-dns",
}
if version.GTE(semver.MustParse("1.12.0")) {
@ -239,6 +243,24 @@ func SetContainerRuntime(cfg map[string]string, runtime string) map[string]strin
return cfg
}
func GetCRISocket(path string, runtime string) string {
if path != "" {
glog.Infoln("Container runtime interface socket provided, using path.")
return path
}
switch runtime {
case "crio", "cri-o":
path = "/var/run/crio/crio.sock"
case "containerd":
path = "/run/containerd/containerd.sock"
default:
path = ""
}
return path
}
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(k8s config.KubernetesConfig) (string, error) {
@ -352,6 +374,8 @@ func generateConfig(k8s config.KubernetesConfig) (string, error) {
return "", errors.Wrap(err, "parsing kubernetes version")
}
criSocket := GetCRISocket(k8s.CRISocket, k8s.ContainerRuntime)
// parses a map of the feature gates for kubeadm and component
kubeadmFeatureArgs, componentFeatureArgs, err := ParseFeatureArgs(k8s.FeatureGates)
if err != nil {
@ -372,6 +396,7 @@ func generateConfig(k8s config.KubernetesConfig) (string, error) {
KubernetesVersion string
EtcdDataDir string
NodeName string
CRISocket string
ExtraArgs []ComponentExtraArgs
FeatureArgs map[string]bool
NoTaintMaster bool
@ -383,6 +408,7 @@ func generateConfig(k8s config.KubernetesConfig) (string, error) {
KubernetesVersion: k8s.KubernetesVersion,
EtcdDataDir: "/data/minikube", //TODO(r2d4): change to something else persisted
NodeName: k8s.NodeName,
CRISocket: criSocket,
ExtraArgs: extraComponentConfig,
FeatureArgs: kubeadmFeatureArgs,
NoTaintMaster: false, // That does not work with k8s 1.12+

View File

@ -38,7 +38,8 @@ networking:
etcd:
dataDir: {{.EtcdDataDir}}
nodeName: {{.NodeName}}
{{range .ExtraArgs}}{{.Component}}:{{range $i, $val := printMapInOrder .Options ": " }}
{{if .CRISocket}}criSocket: {{.CRISocket}}
{{end}}{{range .ExtraArgs}}{{.Component}}:{{range $i, $val := printMapInOrder .Options ": " }}
{{$val}}{{end}}
{{end}}{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}}
{{$i}}: {{$val}}{{end}}

View File

@ -20,6 +20,7 @@ import (
"bytes"
"encoding/json"
"html/template"
"net"
"strings"
"github.com/golang/glog"
@ -33,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/service"
"k8s.io/minikube/pkg/util"
)
@ -94,7 +96,7 @@ func unmarkMaster() error {
// cluster admin privileges to work with RBAC.
func elevateKubeSystemPrivileges() error {
k8s := service.K8s
client, err := k8s.GetClientset()
client, err := k8s.GetClientset(constants.DefaultK8sClientTimeout)
if err != nil {
return errors.Wrap(err, "getting clientset")
}
@ -121,6 +123,10 @@ func elevateKubeSystemPrivileges() error {
}
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(clusterRoleBinding)
if err != nil {
netErr, ok := err.(net.Error)
if ok && netErr.Timeout() {
return &util.RetriableError{Err: errors.Wrap(err, "creating clusterrolebinding")}
}
return errors.Wrap(err, "creating clusterrolebinding")
}
return nil

View File

@ -86,7 +86,7 @@ func (s *SSHRunner) CombinedOutput(cmd string) (string, error) {
b, err := sess.CombinedOutput(cmd)
if err != nil {
return "", errors.Wrapf(err, "running command: %s\n.", cmd)
return "", errors.Wrapf(err, "running command: %s\n, output: %s", cmd, string(b))
}
return string(b), nil
}

View File

@ -35,6 +35,7 @@ type MachineConfig struct {
CPUs int
DiskSize int
VMDriver string
ContainerRuntime string
HyperkitVpnKitSock string // Only used by the Hyperkit driver
HyperkitVSockPorts []string // Only used by the Hyperkit driver
XhyveDiskDriver string // Only used by the xhyve driver
@ -63,6 +64,7 @@ type KubernetesConfig struct {
APIServerIPs []net.IP
DNSDomain string
ContainerRuntime string
CRISocket string
NetworkPlugin string
FeatureGates string
ServiceCIDR string

View File

@ -20,6 +20,7 @@ import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/blang/semver"
@ -27,6 +28,7 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
minikubeVersion "k8s.io/minikube/pkg/version"
"time"
)
// APIServerPort is the port that the API server should listen on.
@ -116,6 +118,7 @@ const (
GithubMinikubeReleasesURL = "https://storage.googleapis.com/minikube/releases.json"
DefaultWait = 20
DefaultInterval = 6
DefaultK8sClientTimeout = 60 * time.Second
DefaultClusterBootstrapper = "kubeadm"
)
@ -164,6 +167,23 @@ var Preflights = []string{
"CRI",
}
// AlternateRuntimePreflights are additional preflight checks that are skipped when running
// any container runtime that isn't Docker
var AlternateRuntimePreflights = append(Preflights, []string{
"Service-Docker",
"Port-8443",
"Port-10251",
"Port-10252",
"Port-2379",
}...)
const (
ContainerdRuntime = "containerd"
RktRuntime = "rkt"
CrioRuntime = "crio"
Cri_oRuntime = "cri-o"
)
const (
DefaultUfsPort = "5640"
DefaultUfsDebugLvl = 0
@ -173,7 +193,7 @@ const (
)
func GetKubernetesReleaseURL(binaryName, version string) string {
return fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/amd64/%s", version, binaryName)
return fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/%s/bin/linux/%s/%s", version, runtime.GOARCH, binaryName)
}
func GetKubernetesReleaseURLSha1(binaryName, version string) string {

View File

@ -43,5 +43,6 @@ func createNoneHost(config cfg.MachineConfig) interface{} {
MachineName: cfg.GetMachineName(),
StorePath: constants.GetMinipath(),
},
ContainerRuntime: config.ContainerRuntime,
}
}

View File

@ -118,7 +118,7 @@ func getLatestVersionFromURL(url string) (semver.Version, error) {
func GetAllVersionsFromURL(url string) (Releases, error) {
var releases Releases
glog.Infof("Checking for updates...")
glog.Info("Checking for updates...")
if err := getJson(url, &releases); err != nil {
return releases, errors.Wrap(err, "Error getting json from minikube version url")
}

View File

@ -42,12 +42,13 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/util"
)
type K8sClient interface {
GetCoreClient() (corev1.CoreV1Interface, error)
GetClientset() (*kubernetes.Clientset, error)
GetClientset(timeout time.Duration) (*kubernetes.Clientset, error)
}
type K8sClientGetter struct{}
@ -59,14 +60,14 @@ func init() {
}
func (k *K8sClientGetter) GetCoreClient() (corev1.CoreV1Interface, error) {
client, err := k.GetClientset()
client, err := k.GetClientset(constants.DefaultK8sClientTimeout)
if err != nil {
return nil, errors.Wrap(err, "getting clientset")
}
return client.Core(), nil
}
func (*K8sClientGetter) GetClientset() (*kubernetes.Clientset, error) {
func (*K8sClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
profile := viper.GetString(config.MachineProfile)
configOverrides := &clientcmd.ConfigOverrides{
@ -80,7 +81,7 @@ func (*K8sClientGetter) GetClientset() (*kubernetes.Clientset, error) {
if err != nil {
return nil, fmt.Errorf("Error creating kubeConfig: %v", err)
}
clientConfig.Timeout = 1 * time.Second
clientConfig.Timeout = timeout
client, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()")

View File

@ -32,6 +32,7 @@ import (
"k8s.io/client-go/kubernetes/typed/core/v1/fake"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/tests"
"time"
)
type MockClientGetter struct {
@ -44,7 +45,7 @@ func (m *MockClientGetter) GetCoreClient() (corev1.CoreV1Interface, error) {
}, nil
}
func (m *MockClientGetter) GetClientset() (*kubernetes.Clientset, error) {
func (m *MockClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clientset, error) {
return nil, nil
}

View File

@ -73,7 +73,7 @@ func (m *clusterInspector) getStateAndRoute() (HostState, *Route, error) {
var route *Route
route, err = getRoute(h, c)
if err != nil {
err = errors.Wrapf(err, "error getting Route info for %s", m.machineName)
err = errors.Wrapf(err, "error getting route info for %s", m.machineName)
return hostState, nil, err
}
return hostState, route, nil

View File

@ -68,7 +68,7 @@ func TestReporter(t *testing.T) {
MinikubeState: Unknown,
MinikubeError: errors.New("minikubeerror"),
RouteError: errors.New("routeerror"),
RouteError: errors.New("route error"),
PatchedServices: nil,
LoadBalancerEmulatorError: errors.New("lberror"),
@ -81,7 +81,7 @@ func TestReporter(t *testing.T) {
services: []
errors:
minikube: minikubeerror
router: routeerror
router: route error
loadbalancer emulator: lberror
`,
},

View File

@ -38,7 +38,7 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
serviceCIDR := route.DestCIDR.String()
gatewayIP := route.Gateway.String()
glog.Infof("Adding Route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
glog.Infof("Adding route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
command := exec.Command("sudo", "route", "-n", "add", serviceCIDR, gatewayIP)
glog.Infof("About to run command: %s", command.Args)
stdInAndOut, err := command.CombinedOutput()
@ -48,10 +48,8 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
return fmt.Errorf("error adding Route: %s, %d", message, len(strings.Split(message, "\n")))
}
glog.Infof("%s", stdInAndOut)
if err != nil {
return err
}
return nil
return err
}
func (router *osRouter) Inspect(route *Route) (exists bool, conflict string, overlaps []string, err error) {

View File

@ -93,7 +93,7 @@ func addRoute(t *testing.T, cidr string, gw string) {
command := exec.Command("sudo", "route", "-n", "add", cidr, gw)
_, err := command.CombinedOutput()
if err != nil {
t.Logf("add Route error (should be ok): %s", err)
t.Logf("add route error (should be ok): %s", err)
}
}

View File

@ -37,7 +37,7 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
serviceCIDR := route.DestCIDR.String()
gatewayIP := route.Gateway.String()
glog.Infof("Adding Route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
glog.Infof("Adding route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
command := exec.Command("sudo", "ip", "route", "add", serviceCIDR, "via", gatewayIP)
glog.Infof("About to run command: %s", command.Args)
stdInAndOut, err := command.CombinedOutput()
@ -124,7 +124,7 @@ func (router *osRouter) Cleanup(route *Route) error {
serviceCIDR := route.DestCIDR.String()
gatewayIP := route.Gateway.String()
glog.Infof("Cleaning up Route for CIDR %s to gateway %s\n", serviceCIDR, gatewayIP)
glog.Infof("Cleaning up route for CIDR %s to gateway %s\n", serviceCIDR, gatewayIP)
command := exec.Command("sudo", "ip", "route", "delete", serviceCIDR)
stdInAndOut, err := command.CombinedOutput()
message := fmt.Sprintf("%s", stdInAndOut)

View File

@ -118,7 +118,7 @@ func addRoute(t *testing.T, cidr string, gw string) {
command := exec.Command("sudo", "ip", "route", "add", cidr, "via", gw)
sout, err := command.CombinedOutput()
if err != nil {
t.Logf("assertion add Route error (should be ok): %s, error: %s", sout, err)
t.Logf("assertion add route error (should be ok): %s, error: %s", sout, err)
} else {
t.Logf("assertion - successfully added %s -> %s", cidr, gw)
}

View File

@ -44,7 +44,7 @@ func (router *osRouter) EnsureRouteIsAdded(route *Route) error {
gatewayIP := route.Gateway.String()
glog.Infof("Adding Route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
glog.Infof("Adding route for CIDR %s to gateway %s", serviceCIDR, gatewayIP)
command := exec.Command("route", "ADD", destinationIP, "MASK", destinationMask, gatewayIP)
glog.Infof("About to run command: %s", command.Args)
stdInAndOut, err := command.CombinedOutput()
@ -127,7 +127,7 @@ func (router *osRouter) Cleanup(route *Route) error {
serviceCIDR := route.DestCIDR.String()
gatewayIP := route.Gateway.String()
glog.Infof("Cleaning up Route for CIDR %s to gateway %s\n", serviceCIDR, gatewayIP)
glog.Infof("Cleaning up route for CIDR %s to gateway %s\n", serviceCIDR, gatewayIP)
command := exec.Command("route", "delete", serviceCIDR)
stdInAndOut, err := command.CombinedOutput()
if err != nil {

View File

@ -43,7 +43,7 @@ func TestWindowsRouteFailsOnConflictIntegrationTest(t *testing.T) {
if err == nil {
t.Errorf("add should have error, but it is nil")
} else if !strings.Contains(err.Error(), "conflict") {
t.Errorf("expected to fail with error containg `conflict`, but failed with wrong error %s", err)
t.Errorf("expected to fail with error contain `conflict`, but failed with wrong error %s", err)
}
cleanRoute(t, "10.96.0.0")
}
@ -156,7 +156,7 @@ func addRoute(t *testing.T, dstIP string, dstMask string, gw string) {
command := exec.Command("route", "ADD", dstIP, "mask", dstMask, gw)
sout, err := command.CombinedOutput()
if err != nil {
t.Logf("assertion add Route error (should be ok): %s, error: %s", sout, err)
t.Logf("assertion add route error (should be ok): %s, error: %s", sout, err)
} else {
t.Logf("assertion - successfully added %s (%s) -> %s", dstIP, dstMask, gw)
}

View File

@ -20,7 +20,11 @@ import (
"fmt"
"os"
"os/exec"
"regexp"
"github.com/docker/machine/libmachine"
"github.com/docker/machine/libmachine/host"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/client-go/kubernetes/typed/core/v1"
@ -113,11 +117,12 @@ func (t *tunnel) cleanup() *Status {
func (t *tunnel) update() *Status {
glog.V(3).Info("updating tunnel status...")
t.status.MinikubeState, _, t.status.MinikubeError = t.clusterInspector.getStateAndHost()
var h *host.Host
t.status.MinikubeState, h, t.status.MinikubeError = t.clusterInspector.getStateAndHost()
defer t.clusterInspector.machineAPI.Close()
if t.status.MinikubeState == Running {
glog.V(3).Infof("minikube is running, trying to add Route %s", t.status.TunnelID.Route)
setupRoute(t)
glog.V(3).Infof("minikube is running, trying to add route%s", t.status.TunnelID.Route)
setupRoute(t, h)
if t.status.RouteError == nil {
t.status.PatchedServices, t.status.LoadBalancerEmulatorError = t.loadBalancerEmulator.PatchServices()
}
@ -127,7 +132,7 @@ func (t *tunnel) update() *Status {
return t.status
}
func setupRoute(t *tunnel) {
func setupRoute(t *tunnel, h *host.Host) {
exists, conflict, _, err := t.router.Inspect(t.status.TunnelID.Route)
if err != nil {
t.status.RouteError = fmt.Errorf("error checking for route state: %s", err)
@ -136,17 +141,57 @@ func setupRoute(t *tunnel) {
if !exists && len(conflict) == 0 {
t.status.RouteError = t.router.EnsureRouteIsAdded(t.status.TunnelID.Route)
if t.status.RouteError == nil {
//the route was added successfully, we need to make sure the registry has it too
//this might fail in race conditions, when another process created this tunnel
if err := t.registry.Register(&t.status.TunnelID); err != nil {
glog.Errorf("failed to register tunnel: %s", err)
t.status.RouteError = err
if t.status.RouteError != nil {
return
}
//the route was added successfully, we need to make sure the registry has it too
//this might fail in race conditions, when another process created this tunnel
if err := t.registry.Register(&t.status.TunnelID); err != nil {
glog.Errorf("failed to register tunnel: %s", err)
t.status.RouteError = err
return
}
if h.DriverName == "hyperkit" {
//the virtio-net interface acts up with ip tunnels :(
command := exec.Command("ifconfig", "bridge100")
glog.Infof("About to run command: %s\n", command.Args)
response, err := command.CombinedOutput()
if err != nil {
t.status.RouteError = fmt.Errorf("running %v: %v", command.Args, err)
return
}
iface := string(response)
pattern := regexp.MustCompile(`.*member: (en\d+) flags=.*`)
submatch := pattern.FindStringSubmatch(iface)
if len(submatch) != 2 {
t.status.RouteError = fmt.Errorf("couldn't find member in bridge100 interface: %s", iface)
return
}
member := submatch[1]
command = exec.Command("sudo", "ifconfig", "bridge100", "deletem", member)
glog.Infof("About to run command: %s\n", command.Args)
response, err = command.CombinedOutput()
glog.Infof(string(response))
if err != nil {
t.status.RouteError = fmt.Errorf("couldn't remove member %s: %s", member, err)
return
}
command = exec.Command("sudo", "ifconfig", "bridge100", "addm", member)
glog.Infof("About to run command: %s\n", command.Args)
response, err = command.CombinedOutput()
glog.Infof(string(response))
if err != nil {
t.status.RouteError = fmt.Errorf("couldn't re-add member %s: %s", member, err)
return
}
}
return
}
// error scenarios
if len(conflict) > 0 {
t.status.RouteError = fmt.Errorf("conflicting route: %s", conflict)
return
@ -172,6 +217,7 @@ func setupRoute(t *tunnel) {
if existingTunnel.Pid != getPid() {
//another running process owns the tunnel
t.status.RouteError = errorTunnelAlreadyExists(existingTunnel)
return
}
}

View File

@ -75,7 +75,7 @@ func (mgr *Manager) startTunnel(ctx context.Context, tunnel controller) (done ch
func (mgr *Manager) timerLoop(ready, check chan bool) {
for {
glog.V(4).Infof("waiting for tunnel to be ready for next check")
glog.V(4).Info("waiting for tunnel to be ready for next check")
<-ready
glog.V(4).Infof("sleep for %s", mgr.delay)
time.Sleep(mgr.delay)

View File

@ -197,7 +197,7 @@ func TestTunnel(t *testing.T) {
substring := "testerror"
if actualSecondState.RouteError == nil || !strings.Contains(actualSecondState.RouteError.Error(), substring) {
t.Errorf("wrong tunnel status. expected Route error to contain '%s' \ngot: %s", substring, actualSecondState.RouteError)
t.Errorf("wrong tunnel status. expected route error to contain '%s' \ngot: %s", substring, actualSecondState.RouteError)
}
expectedRoutes := []*Route{expectedRoute}

View File

@ -70,8 +70,8 @@ func (p *BuildrootProvisioner) GenerateDockerOptions(dockerPort int) (*provision
engineConfigTmpl := `[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
After=network.target minikube-automount.service docker.socket
Requires= minikube-automount.service docker.socket
[Service]
Type=notify

View File

@ -211,7 +211,7 @@ func WaitForService(c kubernetes.Interface, namespace, name string, exist bool,
glog.Infof("Service %s in namespace %s disappeared.", name, namespace)
return !exist, nil
case !IsRetryableAPIError(err):
glog.Infof("Non-retryable failure while getting service.")
glog.Info("Non-retryable failure while getting service.")
return false, err
default:
glog.Infof("Get service %s in namespace %s failed: %v", name, namespace, err)

View File

@ -19,6 +19,7 @@ limitations under the License.
package integration
import (
"bufio"
"fmt"
"io/ioutil"
"net"
@ -46,19 +47,42 @@ func testAddons(t *testing.T) {
}
}
func readLineWithTimeout(b *bufio.Reader, timeout time.Duration) (string, error) {
s := make(chan string)
e := make(chan error)
go func() {
read, err := b.ReadString('\n')
if err != nil {
e <- err
} else {
s <- read
}
close(s)
close(e)
}()
select {
case line := <-s:
return line, nil
case err := <-e:
return "", err
case <-time.After(timeout):
return "", fmt.Errorf("timeout after %s", timeout)
}
}
func testDashboard(t *testing.T) {
t.Parallel()
minikubeRunner := NewMinikubeRunner(t)
cmd, out := minikubeRunner.RunDaemon("dashboard --url")
defer func() {
err := cmd.Process.Kill()
if err != nil {
t.Logf("Failed to kill mount command: %v", err)
t.Logf("Failed to kill dashboard command: %v", err)
}
}()
s, err := out.ReadString('\n')
s, err := readLineWithTimeout(out, 180*time.Second)
if err != nil {
t.Fatalf("failed to read url: %v", err)
}

View File

@ -19,36 +19,53 @@ limitations under the License.
package integration
import (
"context"
"fmt"
"strings"
"testing"
"time"
)
func TestDocker(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
if strings.Contains(minikubeRunner.StartArgs, "--vm-driver=none") {
mk := NewMinikubeRunner(t)
if strings.Contains(mk.StartArgs, "--vm-driver=none") {
t.Skip("skipping test as none driver does not bundle docker")
}
minikubeRunner.RunCommand("delete", false)
startCmd := fmt.Sprintf("start %s %s %s", minikubeRunner.StartArgs, minikubeRunner.Args, "--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
minikubeRunner.RunCommand(startCmd, true)
minikubeRunner.EnsureRunning()
// Start a timer for all remaining commands, to display failure output before a panic.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
// Pre-cleanup: this usually fails, because no instance is running.
mk.RunWithContext(ctx, "delete")
startCmd := fmt.Sprintf("start %s %s %s", mk.StartArgs, mk.Args,
"--docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true")
out, err := mk.RunWithContext(ctx, startCmd)
if err != nil {
t.Fatalf("start: %v\nstart out: %s", err, out)
}
mk.EnsureRunning()
out, err = mk.RunWithContext(ctx, "ssh -- systemctl show docker --property=Environment --no-pager")
if err != nil {
t.Errorf("docker env: %v\ndocker env out: %s", err, out)
}
dockerdEnvironment := minikubeRunner.RunCommand("ssh -- systemctl show docker --property=Environment --no-pager", true)
fmt.Println(dockerdEnvironment)
for _, envVar := range []string{"FOO=BAR", "BAZ=BAT"} {
if !strings.Contains(dockerdEnvironment, envVar) {
t.Fatalf("Env var %s missing from Environment: %s.", envVar, dockerdEnvironment)
if !strings.Contains(string(out), envVar) {
t.Errorf("Env var %s missing: %s.", envVar, out)
}
}
dockerdExecStart := minikubeRunner.RunCommand("ssh -- systemctl show docker --property=ExecStart --no-pager", true)
fmt.Println(dockerdExecStart)
out, err = mk.RunWithContext(ctx, "ssh -- systemctl show docker --property=ExecStart --no-pager")
if err != nil {
t.Errorf("ssh show docker: %v\nshow docker out: %s", err, out)
}
for _, opt := range []string{"--debug", "--icc=true"} {
if !strings.Contains(dockerdExecStart, opt) {
t.Fatalf("Option %s missing from ExecStart: %s.", opt, dockerdExecStart)
if !strings.Contains(string(out), opt) {
t.Fatalf("Option %s missing from ExecStart: %s.", opt, out)
}
}
}

View File

@ -34,13 +34,14 @@ import (
)
func testMounting(t *testing.T) {
t.Parallel()
if runtime.GOOS == "darwin" {
t.Skip("mount tests disabled in darwin due to timeout (issue#3200)")
}
if strings.Contains(*args, "--vm-driver=none") {
t.Skip("skipping test for none driver as it does not need mount")
}
t.Parallel()
minikubeRunner := NewMinikubeRunner(t)
tempDir, err := ioutil.TempDir("", "mounttest")

View File

@ -20,7 +20,9 @@ import (
"fmt"
"io/ioutil"
"net/http"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
@ -33,11 +35,20 @@ import (
)
func testTunnel(t *testing.T) {
if runtime.GOOS != "windows" {
// Otherwise minikube fails waiting for a password.
if err := exec.Command("sudo", "-n", "route").Run(); err != nil {
t.Skipf("password required to execute 'route', skipping testTunnel: %v", err)
}
}
t.Log("starting tunnel test...")
runner := NewMinikubeRunner(t)
go func() {
output := runner.RunCommand("tunnel --alsologtostderr -v 8", true)
fmt.Println(output)
output := runner.RunCommand("tunnel --alsologtostderr -v 8 --logtostderr", true)
if t.Failed() {
fmt.Println(output)
}
}()
err := tunnel.NewManager().CleanupNotRunningTunnels()
@ -88,10 +99,21 @@ func testTunnel(t *testing.T) {
}
httpClient := http.DefaultClient
httpClient.Timeout = 1 * time.Second
resp, err := httpClient.Get(fmt.Sprintf("http://%s", nginxIP))
httpClient.Timeout = 5 * time.Second
if err != nil {
var resp *http.Response
request := func() error {
resp, err = httpClient.Get(fmt.Sprintf("http://%s", nginxIP))
if err != nil {
retriable := &commonutil.RetriableError{Err: err}
t.Log(retriable)
return retriable
}
return nil
}
if err = commonutil.RetryAfter(5, request, 1*time.Second); err != nil {
t.Fatalf("error reading from nginx at address(%s): %s", nginxIP, err)
}

View File

@ -19,6 +19,7 @@ package util
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"math/rand"
@ -30,7 +31,6 @@ import (
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/minikube/pkg/minikube/assets"
commonutil "k8s.io/minikube/pkg/util"
@ -81,6 +81,13 @@ func (m *MinikubeRunner) RunCommand(command string, checkError bool) string {
return string(stdout)
}
// RunWithContext calls the minikube command with a context, useful for timeouts.
func (m *MinikubeRunner) RunWithContext(ctx context.Context, command string) ([]byte, error) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
return exec.CommandContext(ctx, path, commandArr...).CombinedOutput()
}
func (m *MinikubeRunner) RunDaemon(command string) (*exec.Cmd, *bufio.Reader) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(m.BinaryPath)
@ -183,8 +190,9 @@ func (k *KubectlRunner) RunCommand(args []string) (stdout []byte, err error) {
cmd := exec.Command(k.BinaryPath, args...)
stdout, err = cmd.CombinedOutput()
if err != nil {
k.T.Logf("Error %s running command %s. Return code: %v", stdout, args, err)
return &commonutil.RetriableError{Err: fmt.Errorf("Error running command: %v. Output: %s", err, stdout)}
retriable := &commonutil.RetriableError{Err: fmt.Errorf("error running command %s: %v. Stdout: \n %s", args, err, stdout)}
k.T.Log(retriable)
return retriable
}
return nil
}