Merge branch 'master' into mdlint

pull/3840/head
Thomas Strömberg 2019-03-20 10:18:22 -07:00 committed by GitHub
commit 667c88d8e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 107 additions and 89 deletions

View File

@ -1,8 +1,5 @@
<!-- If you are reporting a bug, please include:
- How to replicate the error, including the exact `minikube` command-line used.
- The full output of the command that failed
- The output of the "minikube logs" command
- Which operating system version was used
Thank you! Please write about your experience below this line: --!>
<!-- Thank you for sharing your experience! If you are reporting a bug, please include: -->
<!-- * The exact command-lines used so that we can replicate the issue -->
<!-- * The full output of the command that failed -->
<!-- * The output of the "minikube logs" command, if applicable -->
<!-- * Which operating system version was used -->

View File

@ -104,7 +104,7 @@ out/minikube.d: pkg/minikube/assets/assets.go
$(MAKEDEPEND) out/minikube-$(GOOS)-$(GOARCH) $(ORG) $^ $(MINIKUBEFILES) > $@
-include out/minikube.d
out/minikube-%-$(GOARCH): pkg/minikube/assets/assets.go
out/minikube-%: pkg/minikube/assets/assets.go
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
$(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@)
else
@ -119,7 +119,7 @@ ifneq ($(GOPATH)/src/$(REPOPATH),$(CURDIR))
$(warning https://github.com/kubernetes/minikube/blob/master/docs/contributors/build_guide.md)
$(warning ******************************************************************************)
endif
GOOS=$* GOARCH=$(GOARCH) go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
GOOS="$(firstword $(subst -, ,$*))" GOARCH="$(lastword $(subst -, ,$*))" go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube
endif
.PHONY: e2e-%-amd64

View File

@ -237,7 +237,7 @@ func beginCacheImages(g *errgroup.Group, kVersion string) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("caching", "Caching images in the background ...")
console.OutStyle("caching", "Downloading Kubernetes %s images in the background ...", kVersion)
g.Go(func() error {
return machine.CacheImagesForBootstrapper(kVersion, viper.GetString(cmdcfg.Bootstrapper))
})
@ -487,7 +487,7 @@ func waitCacheImages(g *errgroup.Group) {
if !viper.GetBool(cacheImages) {
return
}
console.OutStyle("waiting", "Waiting for image caching to complete ...")
console.OutStyle("waiting", "Waiting for image downloads to complete ...")
if err := g.Wait(); err != nil {
glog.Errorln("Error caching images: ", err)
}

View File

@ -64,12 +64,7 @@ NOTE: Confirm that all release-related PR's have been submitted before doing thi
Do this in a direct clone of the upstream kubernetes/minikube repository (not your fork!):
```shell
version=<new version number>
git fetch
git checkout master
git pull
git tag -a v$version -m "$version Release"
git push origin v$version
hack/tag_release.sh <new version number>
```
## Build the Release

View File

@ -36,7 +36,7 @@ sudo apt install libvirt-bin libvirt-daemon-system qemu-kvm
sudo yum install libvirt-daemon-kvm qemu-kvm
```
Enable,start, and verify the libvirtd service has started.
Enable,start, and verify the `libvirtd` service has started.
```shell
sudo systemctl enable libvirtd.service
@ -44,13 +44,17 @@ sudo systemctl start libvirtd.service
sudo systemctl status libvirtd.service
```
Then you will need to add yourself to libvirt group (older distributions may use libvirtd instead)
Then you will need to add yourself to `libvirt` group (older distributions may use `libvirtd` instead)
`sudo usermod -a -G libvirt $(whoami)`
```shell
sudo usermod -a -G libvirt $(whoami)
```
Then to join the group with your current user session:
`newgrp libvirt`
```shell
newgrp libvirt
```
Now install the driver:
@ -63,9 +67,9 @@ NOTE: Ubuntu users on a release older than 18.04, or anyone experiencing [#3206:
```shell
sudo apt install libvirt-dev
test -d $HOME/go/src/k8s.io/minikube || \
git clone https://github.com/kubernetes/minikube.git $HOME/go/src/k8s.io/minikube
cd $HOME/go/src/k8s.io/minikube
test -d $GOPATH/src/k8s.io/minikube || \
git clone https://github.com/kubernetes/minikube.git $GOPATH/src/k8s.io/minikube
cd $GOPATH/src/k8s.io/minikube
git pull
make out/docker-machine-driver-kvm2
sudo install out/docker-machine-driver-kvm2 /usr/local/bin

View File

@ -118,8 +118,8 @@ FILES_TO_UPLOAD=(
'minikube-linux-amd64.sha256'
'minikube-darwin-amd64'
'minikube-darwin-amd64.sha256'
'minikube-windows-amd64'
'minikube-windows-amd64.sha256'
'minikube-windows-amd64.exe'
'minikube-windows-amd64.exe.sha256'
'minikube-installer.exe'
"minikube_${DEB_VERSION}.deb"
"minikube-${RPM_VERSION}.rpm"

39
hack/tag_release.sh Normal file
View File

@ -0,0 +1,39 @@
#!/bin/bash
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eux -o pipefail
if [ "$#" -ne 1 ]; then
echo "Usage: tag_release.sh <major>.<minor>.<build>" >&2
exit 1
fi
readonly version=$1
readonly tag="v${version}"
if [[ ! "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "supplied version does not match expectations: ${version}"
exit 2
fi
readonly clean_repo=$(mktemp -d)
git clone --depth 1 git@github.com:kubernetes/minikube.git "${clean_repo}"
cd "${clean_repo}"
git fetch
git checkout master
git pull
git tag -a "${tag}" -m "$version Release"
git push origin "${tag}"

View File

@ -25,7 +25,7 @@
RequestExecutionLevel admin ;Require admin rights on NT6+ (When UAC is turned on)
InstallDir "$PROGRAMFILES\${COMPANYNAME}\${APPNAME}"
InstallDir "$PROGRAMFILES64\${COMPANYNAME}\${APPNAME}"
!define UNINSTALLDIR "Software\Microsoft\Windows\CurrentVersion\Uninstall\${COMPANYNAME} ${APPNAME}"
BrandingText " "

View File

@ -373,9 +373,8 @@ func NewKubeletConfig(k8s config.KubernetesConfig, r cruntime.Manager) (string,
func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error {
if cfg.ShouldLoadCachedImages {
err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir)
if err != nil {
return errors.Wrap(err, "loading cached images")
if err := machine.LoadImages(k.c, constants.GetKubeadmCachedImages(cfg.KubernetesVersion), constants.ImageCacheDir); err != nil {
console.Failure("Unable to load cached images: %v", err)
}
}
r, err := cruntime.New(cruntime.Config{Type: cfg.ContainerRuntime, Socket: cfg.CRISocket})

View File

@ -80,7 +80,7 @@ func (r *Containerd) Disable() error {
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
glog.Infof("Loading image: %s", path)
return r.Runner.Run(fmt.Sprintf("sudo ctr cri load %s", path))
return r.Runner.Run(fmt.Sprintf("sudo ctr images import %s", path))
}
// KubeletOptions returns kubelet options for a containerd

View File

@ -30,7 +30,7 @@ import (
const errMsg = `
The Xhyve driver is not included in minikube yet. Please follow the directions at
https://github.com/kubernetes/minikube/blob/master/DRIVERS.md#xhyve-driver
https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#xhyve-driver
`
func init() {

View File

@ -33,7 +33,7 @@ import (
)
// rootCauseRe is a regular expression that matches known failure root causes
var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --`)
var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted`)
// importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs.
var importantPods = []string{

View File

@ -28,10 +28,11 @@ func TestIsProblem(t *testing.T) {
}{
{"almost", false, "F2350 I would love to be an unknown flag, but I am not -- :( --"},
{"apiserver-required-flag #1962", true, "error: [service-account-issuer is a required flag when BoundServiceAccountTokenVolume is enabled, --service-account-signing-key-file and --service-account-issuer are required flags"},
{"kubelet-eviction #", true, "I0213 07:16:44.041623 2410 eviction_manager.go:187] eviction manager: pods kube-apiserver-minikube_kube-system(87f41e2e0629c3deb5c2239e08d8045d) evicted, waiting for pod to be cleaned up"},
{"kubelet-eviction #3611", true, `eviction_manager.go:187] eviction manager: pods kube-proxy-kfs8p_kube-system(27fd6b4b-33cf-11e9-ae1d-00155d4b0144) evicted, waiting for pod to be cleaned up`},
{"kubelet-unknown-flag #3655", true, "F0212 14:55:46.443031 2693 server.go:148] unknown flag: --AllowedUnsafeSysctls"},
{"apiserver-auth-mode #2852", true, `{"log":"Error: unknown flag: --Authorization.Mode\n","stream":"stderr","time":"2018-06-17T22:16:35.134161966Z"}`},
{"apiserver-admission #3524", true, "error: unknown flag: --GenericServerRunOptions.AdmissionControl"},
{"no-providers-available #3818", true, ` kubelet.go:1662] Failed creating a mirror pod for "kube-apiserver-minikube_kube-system(c7d572aebd3d33b17fa78ae6395b6d0a)": pods "kube-apiserver-minikube" is forbidden: no providers available to validate pod request`},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {

View File

@ -103,9 +103,9 @@ func (l *loadBalancerEmulator) updateService(restClient rest.Interface, svc core
request := l.patchConverter.convert(restClient, patch)
result, err := l.requestSender.send(request)
if err != nil {
glog.Infof("Patched %s with IP %s", svc.Name, clusterIP)
} else {
glog.Errorf("error patching %s with IP %s: %s", svc.Name, clusterIP, err)
} else {
glog.Infof("Patched %s with IP %s", svc.Name, clusterIP)
}
return result, err
}

View File

@ -27,8 +27,8 @@ import (
)
func TestFunctional(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
minikubeRunner.EnsureRunning()
r := NewMinikubeRunner(t)
r.EnsureRunning()
// This one is not parallel, and ensures the cluster comes up
// before we run any other tests.
t.Run("Status", testClusterStatus)
@ -41,7 +41,7 @@ func TestFunctional(t *testing.T) {
t.Run("Provisioning", testProvisioning)
t.Run("Tunnel", testTunnel)
if !usingNoneDriver(minikubeRunner) {
if !usingNoneDriver(r) {
t.Run("EnvVars", testClusterEnv)
t.Run("SSH", testClusterSSH)
t.Run("IngressController", testIngressController)
@ -50,25 +50,22 @@ func TestFunctional(t *testing.T) {
}
func TestFunctionalContainerd(t *testing.T) {
minikubeRunner := NewMinikubeRunner(t)
r := NewMinikubeRunner(t)
if usingNoneDriver(minikubeRunner) {
if usingNoneDriver(r) {
t.Skip("Can't run containerd backend with none driver")
}
if minikubeRunner.GetStatus() != state.None.String() {
minikubeRunner.RunCommand("delete", true)
if r.GetStatus() != state.None.String() {
r.RunCommand("delete", true)
}
minikubeRunner.SetRuntime("containerd")
minikubeRunner.EnsureRunning()
r.Start("--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock")
t.Run("Gvisor", testGvisor)
t.Run("GvisorRestart", testGvisorRestart)
minikubeRunner.RunCommand("delete", true)
r.RunCommand("delete", true)
}
// usingNoneDriver returns true if using the none driver
func usingNoneDriver(runner util.MinikubeRunner) bool {
return strings.Contains(runner.StartArgs, "--vm-driver=none")
func usingNoneDriver(r util.MinikubeRunner) bool {
return strings.Contains(r.StartArgs, "--vm-driver=none")
}

View File

@ -30,48 +30,47 @@ import (
func TestStartStop(t *testing.T) {
tests := []struct {
runtime string
name string
args []string
}{
{runtime: "docker"},
{runtime: "containerd"},
{runtime: "crio"},
{"docker+cache", []string{"--container-runtime=docker", "--cache-images"}},
{"containerd+cache", []string{"--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock", "--cache-images"}},
{"crio+cache", []string{"--container-runtime=crio", "--cache-images"}},
}
for _, test := range tests {
t.Run(test.runtime, func(t *testing.T) {
runner := NewMinikubeRunner(t)
if test.runtime != "docker" && usingNoneDriver(runner) {
t.Skipf("skipping, can't use %s with none driver", test.runtime)
t.Run(test.name, func(t *testing.T) {
r := NewMinikubeRunner(t)
if !strings.Contains(test.name, "docker") && usingNoneDriver(r) {
t.Skipf("skipping %s - incompatible with none driver", test.name)
}
runner.RunCommand("config set WantReportErrorPrompt false", true)
runner.RunCommand("delete", false)
runner.CheckStatus(state.None.String())
r.RunCommand("config set WantReportErrorPrompt false", true)
r.RunCommand("delete", false)
r.CheckStatus(state.None.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())
runner.SetRuntime(test.runtime)
runner.Start()
runner.CheckStatus(state.Running.String())
ip := runner.RunCommand("ip", true)
ip := r.RunCommand("ip", true)
ip = strings.TrimRight(ip, "\n")
if net.ParseIP(ip) == nil {
t.Fatalf("IP command returned an invalid address: %s", ip)
}
checkStop := func() error {
runner.RunCommand("stop", true)
return runner.CheckStatusNoFail(state.Stopped.String())
r.RunCommand("stop", true)
return r.CheckStatusNoFail(state.Stopped.String())
}
if err := util.Retry(t, checkStop, 5*time.Second, 6); err != nil {
t.Fatalf("timed out while checking stopped status: %v", err)
}
runner.Start()
runner.CheckStatus(state.Running.String())
r.Start(test.args...)
r.CheckStatus(state.Running.String())
runner.RunCommand("delete", true)
runner.CheckStatus(state.None.String())
r.RunCommand("delete", true)
r.CheckStatus(state.None.String())
})
}
}

View File

@ -184,11 +184,6 @@ func (m *MinikubeRunner) RunDaemon2(command string) (*exec.Cmd, *bufio.Reader, *
return cmd, bufio.NewReader(stdoutPipe), bufio.NewReader(stderrPipe)
}
// SetRuntime saves the runtime backend
func (m *MinikubeRunner) SetRuntime(runtime string) {
m.Runtime = runtime
}
func (m *MinikubeRunner) SSH(command string) (string, error) {
path, _ := filepath.Abs(m.BinaryPath)
cmd := exec.Command(path, "ssh", command)
@ -202,17 +197,9 @@ func (m *MinikubeRunner) SSH(command string) (string, error) {
return string(stdout), nil
}
func (m *MinikubeRunner) Start() {
opts := ""
// TODO(tstromberg): Deprecate this in favor of making it possible for tests to define explicit flags.
switch r := m.Runtime; r {
case "containerd":
opts = "--container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock"
case "crio":
opts = "--container-runtime=cri-o"
}
m.RunCommand(fmt.Sprintf("start %s %s %s --alsologtostderr --v=5", m.StartArgs, m.Args, opts), true)
func (m *MinikubeRunner) Start(opts ...string) {
cmd := fmt.Sprintf("start %s %s %s --alsologtostderr --v=2", m.StartArgs, m.Args, strings.Join(opts, " "))
m.RunCommand(cmd, true)
}
func (m *MinikubeRunner) EnsureRunning() {