Merge branch 'master' into update-localkube-docker

pull/1106/head
Aaron Prindle 2017-02-17 09:39:33 -08:00 committed by GitHub
commit 19bdd1d097
131 changed files with 2190 additions and 3597 deletions

View File

@ -40,6 +40,8 @@ Here is a rough set of steps that usually works to add a new dependency.
If it is a large dependency, please commit the vendor/ directory changes separately.
This makes review easier in Github.
NOTE: We have recently added a deprecation message regarding boot2docker. Make sure that this deprecation message ends up in the vendored code at `/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go`: [https://github.com/kubernetes/minikube/blob/master/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go#L220](https://github.com/kubernetes/minikube/blob/master/vendor/github.com/docker/machine/libmachine/provision/boot2docker.go#L220)
```shell
git add vendor/
git commit -m "Adding dependency foo"

2302
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,9 @@ INSTALL_SIZE ?= $(shell du out/minikube-windows-amd64.exe | cut -f1)
BUILDROOT_BRANCH ?= 2016.08
REGISTRY?=gcr.io/k8s-minikube
ISO_VERSION ?= v1.0.6
ISO_BUCKET ?= minikube/iso
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
BUILD_DIR ?= ./out
@ -49,7 +52,7 @@ TAG ?= $(LOCALKUBE_VERSION)
# Set the version information for the Kubernetes servers, and build localkube statically
K8S_VERSION_LDFLAGS := $(shell $(PYTHON) hack/get_k8s_version.py 2>&1)
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION)
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET)
LOCALKUBE_LDFLAGS := "$(K8S_VERSION_LDFLAGS) $(MINIKUBE_LDFLAGS) -s -w -extldflags '-static'"
LOCALKUBEFILES := GOPATH=$(GOPATH) go list -f '{{join .Deps "\n"}}' ./cmd/localkube/ | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}'
@ -83,13 +86,14 @@ localkube-image: out/localkube
iso:
cd deploy/iso/boot2docker && ./build.sh
minikube-iso:
minikube_iso:
if [ ! -d $(BUILD_DIR)/buildroot ]; then \
mkdir -p $(BUILD_DIR); \
git clone --branch=$(BUILDROOT_BRANCH) https://github.com/buildroot/buildroot $(BUILD_DIR)/buildroot; \
fi;
$(MAKE) BR2_EXTERNAL=../../deploy/iso/minikube-iso minikube_defconfig -C $(BUILD_DIR)/buildroot
$(MAKE) -C $(BUILD_DIR)/buildroot
mv $(BUILD_DIR)/buildroot/output/images/rootfs.iso9660 $(BUILD_DIR)/minikube.iso
test-iso:
go test -v $(REPOPATH)/test/integration --tags=iso --minikube-args="--iso-url=file://$(shell pwd)/out/buildroot/output/images/rootfs.iso9660"
@ -111,9 +115,9 @@ $(GOPATH)/bin/go-bindata: $(GOPATH)/src/$(ORG)
.PHONY: cross
cross: out/localkube out/minikube-linux-amd64 out/minikube-darwin-amd64 out/minikube-windows-amd64.exe
.PHONE: checksum
.PHONY: checksum
checksum:
for f in out/localkube out/minikube-linux-amd64 out/minikube-darwin-amd64 out/minikube-windows-amd64.exe ; do \
for f in out/localkube out/minikube-linux-amd64 out/minikube-darwin-amd64 out/minikube-windows-amd64.exe out/minikube.iso; do \
if [ -f "$${f}" ]; then \
openssl sha256 "$${f}" | awk '{print $$2}' > "$${f}.sha256" ; \
fi ; \
@ -180,3 +184,8 @@ out/localkube-image: out/localkube
@echo ""
@echo "${REGISTRY}/localkube-image:$(TAG) succesfully built"
@echo "See https://github.com/kubernetes/minikube/tree/master/deploy/docker for instrucions on how to run image"
.PHONY: release-iso
release-iso: minikube_iso checksum
gsutil cp out/minikube.iso gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso
gsutil cp out/minikube.iso.sha256 gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso.sha256

View File

@ -45,6 +45,8 @@ We publish CI builds of minikube, built at every Pull Request. Builds are availa
- https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-linux-amd64
- https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-windows-amd64.exe
We also publish CI builds of minikube-iso, built at every Pull Request that touches deploy/iso/minikube-iso. Builds are available at:
- https://storage.googleapis.com/minikube-builds/PR_NUMBER/minikube-testing.iso
## Quickstart
@ -120,7 +122,7 @@ plugins, if required.
### Reusing the Docker daemon
When using a single VM of kubernetes its really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments.
When using a single VM of kubernetes it's really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments.
To be able to work with the docker daemon on your mac/linux host use the [docker-env command](./docs/minikube_docker-env.md) in your shell:
@ -205,7 +207,7 @@ To change the `MaxPods` setting to 5 on the Kubelet, pass this flag: `--extra-co
This feature also supports nested structs. To change the `LeaderElection.LeaderElect` setting to `true` on the scheduler, pass this flag: `--extra-config=scheduler.LeaderElection.LeaderElect=true`.
To set the `AuthorizationMode` on the `apiserver` to `RBAC`, you can use: `--extra-config=apiserver.GenericServerRunOptions.AuthorizationMode=RBAC`. You should use `--extra-config=apiserver.GenericServerRunOptions.AuthorizationRBACSuperUser=minikube` as well in that case.
To set the `AuthorizationMode` on the `apiserver` to `RBAC`, you can use: `--extra-config=apiserver.GenericServerRunOptions.AuthorizationMode=RBAC`. You should use `--extra-config=apiserver.GenericServerRunOptions.AuthorizationRBAC,SuperUser=minikube` as well in that case.
To enable all alpha feature gates, you can use: `--feature-gates=AllAlpha=true`
@ -264,6 +266,7 @@ However, Minikube is configured to persist files stored under the following host
* `/data`
* `/var/lib/localkube`
* `/var/lib/docker`
* `/tmp/hostpath_pv`
Here is an example PersistentVolume config to persist data in the '/data' directory:
@ -349,8 +352,38 @@ $ minikube start --docker-env HTTP_PROXY=http://$YOURPROXY:PORT \
--docker-env HTTPS_PROXY=https://$YOURPROXY:PORT
```
## Minikube Environment Variables
Minikube supports passing environment variables instead of flags for every value listed in `minikube config list`. This is done by passing an environment variable with the prefix `MINIKUBE_`For example the `minikube start --iso-url="$ISO_URL"` flag can also be set by setting the `MINIKUBE_ISO_URL="$ISO_URL"` environment variable.
## Known Issues
Some features can only be accessed by environment variables, here is a list of these features:
* **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration
* **MINIKUBE_WANTUPDATENOTIFICATION** - (bool) sets whether the user wants an update notification for new minikube versions
* **MINIKUBE_REMINDERWAITPERIODINHOURS** - (int) sets the number of hours to check for an update notification
* **MINIKUBE_WANTREPORTERROR** - (bool) sets whether the user wants to send anonymous errors reports to help improve minikube
* **MINIKUBE_WANTREPORTERRORPROMPT** - (bool) sets whether the user wants to be prompted on an error that they can report them to help improve minikube
* **MINIKUBE_WANTKUBECTLDOWNLOADMSG** - (bool) sets whether minikube should tell a user that `kubectl` cannot be found on there path
* **MINIKUBE_ENABLE_PROFILING** - (int, `1` enables it) enables trace profiling to be generated for minikube which can be analyzed via:
```shell
# set env var and then run minikube
$ MINIKUBE_ENABLE_PROFILING=1 ./out/minikube start
2017/01/09 13:18:00 profile: cpu profiling enabled, /tmp/profile933201292/cpu.pprof
Starting local Kubernetes cluster...
Kubectl is now configured to use the cluster.
2017/01/09 13:19:06 profile: cpu profiling disabled, /tmp/profile933201292/cpu.pprof
# Then you can examine the profile with:
$ go tool pprof /tmp/profile933201292/cpu.pprof
```
## KNOWN Issues
* Features that require a Cloud Provider will not work in Minikube. These include:
* LoadBalancers
* Features that require multiple nodes. These include:

View File

@ -20,7 +20,7 @@ Here are some specific features that align with our goal:
This section contains the overall priorities of the minikube project, in rough order.
* Setting up a well-tested, secure and complete Kubernetes cluster locally.
* Mac OSX and Linux support.
* Cross Platform support (macOS, Linux, Windows)
* Supporting existing Kubernetes features:
* Load Balancer support.
* Persistent disks.
@ -28,30 +28,21 @@ This section contains the overall priorities of the minikube project, in rough o
* Development-focused features like:
* Mounting host directories.
* VPN/proxy networking.
* Windows support.
* Native hypervisor integration.
* Support for alternative Kubernetes runtimes, like rkt.
* Removing the VirtualBox dependency and replacing it with Hypervisor.framework/Hyper-V.
* Support for multiple nodes.
## Timelines
These are rough dates, on a 3-month schedule. Minikube will release much faster than this, so this section is fairly speculative.
Minikube will release much faster than this, so this section is fairly speculative.
This section is subject to change based on feedback and staffing.
### June 2016
* Fully-tested, complete release of minikube that supports:
* Mac OSX and Linux.
* Kubernetes 1.3.
* Docker 1.11.x.
* VirtualBox.
### Q1 2017
### September 2016
* Support for Windows.
* Kubernetes 1.4, Docker 1.x.y.
* Host Directory mounting.
* Improved networking (Ingress, proxies, VPN...).
### December 2016
* Native hypervisor integration (Hypervisor.framework for OSX, Hyper-V for Windows).
* Support Rkt.
* Remove hypervisor on Linux systems.
* Release Kubernetes 1.6.0 alpha and beta releases packaged with minikube
* Release Kubernetes 1.6.0 packaged with minikube within two days of GA upstream build
* Run local e2e Kubernetes tests with minikube
* Minikube no longer depends on libmachine
* Minikube no longer depends on existing KVM driver
* Native drivers are made default and packaged with minikube
* Improve minikube start time by 30%
* Add a no-vm driver for linux CI environments

View File

@ -88,13 +88,14 @@ minikube addons enable %s`, addonName, addonName))
serviceList, err := cluster.GetServiceListByLabel(namespace, key, addonName)
if err != nil {
fmt.Fprintf(os.Stderr, "Error getting service with namespace: %s and labels %s:%s: %s", namespace, key, addonName, err)
fmt.Fprintf(os.Stderr, "Error getting service with namespace: %s and labels %s:%s: %s\n", namespace, key, addonName, err)
os.Exit(1)
}
if len(serviceList.Items) == 0 {
fmt.Fprintf(os.Stdout, `
This addon does not have an endpoint defined for the 'addons open' command
You can add one by annotating a service with the label %s:%s`, key, addonName)
You can add one by annotating a service with the label %s:%s
`, key, addonName)
os.Exit(0)
}
for i := range serviceList.Items {

View File

@ -31,7 +31,7 @@ var configUnsetCmd = &cobra.Command{
Long: "unsets PROPERTY_NAME from the minikube config file. Can be overwritten by flags or environmental variables",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Fprintf(os.Stdout, "usage: minikube config unset PROPERTY_NAME")
fmt.Fprintln(os.Stdout, "usage: minikube config unset PROPERTY_NAME")
os.Exit(1)
}
err := unset(args[0])

View File

@ -37,7 +37,7 @@ import (
)
var dirs = [...]string{
constants.Minipath,
constants.GetMinipath(),
constants.MakeMiniPath("certs"),
constants.MakeMiniPath("machines"),
constants.MakeMiniPath("cache"),

View File

@ -49,6 +49,8 @@ var serviceCmd = &cobra.Command{
os.Exit(1)
}
serviceURLTemplate = t
RootCmd.PersistentPreRun(cmd, args)
},
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 || len(args) > 1 {

View File

@ -104,6 +104,7 @@ func runStart(cmd *cobra.Command, args []string) {
Downloader: pkgutil.DefaultDownloader{},
}
fmt.Println("Starting VM...")
var host *host.Host
start := func() (err error) {
host, err = cluster.StartHost(api, config)
@ -131,21 +132,26 @@ func runStart(cmd *cobra.Command, args []string) {
NetworkPlugin: viper.GetString(networkPlugin),
ExtraOptions: extraOptions,
}
fmt.Println("SSH-ing files into VM...")
if err := cluster.UpdateCluster(host, host.Driver, kubernetesConfig); err != nil {
glog.Errorln("Error updating cluster: ", err)
cmdUtil.MaybeReportErrorAndExit(err)
}
fmt.Println("Setting up certs...")
if err := cluster.SetupCerts(host.Driver); err != nil {
glog.Errorln("Error configuring authentication: ", err)
cmdUtil.MaybeReportErrorAndExit(err)
}
fmt.Println("Starting cluster components...")
if err := cluster.StartCluster(host, kubernetesConfig); err != nil {
glog.Errorln("Error starting cluster: ", err)
cmdUtil.MaybeReportErrorAndExit(err)
}
fmt.Println("Connecting to cluster...")
kubeHost, err := host.Driver.GetURL()
if err != nil {
glog.Errorln("Error connecting to cluster: ", err)
@ -153,6 +159,7 @@ func runStart(cmd *cobra.Command, args []string) {
kubeHost = strings.Replace(kubeHost, "tcp://", "https://", -1)
kubeHost = strings.Replace(kubeHost, ":2376", ":"+strconv.Itoa(constants.APIServerPort), -1)
fmt.Println("Setting up kubeconfig...")
// setup kubeconfig
keepContext := viper.GetBool(keepContext)
name := constants.MinikubeContext

View File

@ -42,3 +42,11 @@ spec:
secretKeyRef:
name: registry-creds
key: aws-account
volumeMounts:
- name: gcr-creds
mountPath: "/root/.config/gcloud"
readOnly: true
volumes:
- name: gcr-creds
secret:
secretName: gcr-secret

View File

@ -40,10 +40,10 @@ Either import your private key or generate a sign-only key using `gpg2 --gen-key
```
$ git clone https://github.com/kubernetes/minikube
$ cd minikube
$ make minikube-iso
$ make minikube_iso
```
The bootable ISO image will be available in `out/buildroot/output/images/rootfs.iso9660`.
The bootable ISO image will be available in `out/minikube.iso`.
### Testing local minikube-iso changes

View File

@ -9,6 +9,8 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
@ -24,7 +26,9 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_KPROBES=y
@ -96,7 +100,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_IP_SET=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@ -185,6 +188,7 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_IP_SET=y
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_LOG_ARP=m
CONFIG_IP_NF_IPTABLES=y
@ -204,8 +208,11 @@ CONFIG_IP6_NF_MANGLE=y
CONFIG_BRIDGE=m
CONFIG_NET_SCHED=y
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_BPF=m
CONFIG_BPF_JIT=y
CONFIG_HAMRADIO=y
CONFIG_CFG80211=y
CONFIG_MAC80211=y
@ -274,6 +281,7 @@ CONFIG_8139CP=y
CONFIG_8139TOO=y
CONFIG_FDDI=y
CONFIG_VMXNET3=y
CONFIG_HYPERV_NET=m
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
@ -332,6 +340,7 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_HYPERV_MOUSE=m
CONFIG_HID_TOPSEED=y
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
@ -348,6 +357,9 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
CONFIG_DMADEVICES=y
CONFIG_VIRTIO_PCI=y
CONFIG_HYPERV=m
CONFIG_HYPERV_UTILS=m
CONFIG_HYPERV_BALLOON=m
CONFIG_EEEPC_LAPTOP=y
CONFIG_AMD_IOMMU=y
CONFIG_INTEL_IOMMU=y
@ -373,35 +385,12 @@ CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V2=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
CONFIG_NFS_SWAP=y
CONFIG_NFS_V4_1=y
# CONFIG_NFS_V4_2 is not set
CONFIG_PNFS_FILE_LAYOUT=y
CONFIG_PNFS_BLOCK=m
CONFIG_PNFS_OBJLAYOUT=m
CONFIG_PNFS_FLEXFILE_LAYOUT=m
CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
# CONFIG_NFS_V4_1_MIGRATION is not set
# CONFIG_ROOT_NFS is not set
# CONFIG_NFS_USE_LEGACY_DNS is not set
CONFIG_NFS_USE_KERNEL_DNS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
# CONFIG_NFSD_PNFS is not set
# CONFIG_NFSD_V4_SECURITY_LABEL is not set
# CONFIG_NFSD_FAULT_INJECTION is not set
CONFIG_GRACE_PERIOD=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_9P_FS=m
CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
@ -431,25 +420,3 @@ CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BPF=y
CONFIG_BPF_SYSCALL=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_ACT_BPF=m
CONFIG_BPF_JIT=y
CONFIG_HAVE_BPF_JIT=y
CONFIG_BPF_EVENTS=y
#
# Hyper V
#
CONFIG_HYPERV_STORAGE=m
CONFIG_HYPERV_NET=m
CONFIG_HYPERV_KEYBOARD=m
CONFIG_HID_HYPERV_MOUSE=m
#
# Microsoft Hyper-V guest support
#
CONFIG_HYPERV=m
CONFIG_HYPERV_UTILS=m
CONFIG_HYPERV_BALLOON=m

View File

@ -0,0 +1,9 @@
[Match]
Name=eth1
Virtualization=qemu
[Network]
DHCP=ipv4
[DHCP]
UseDNS=false

View File

@ -127,6 +127,9 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir -p /mnt/$PARTNAME/data
ln -s /mnt/$PARTNAME/data /data
mkdir -p /mnt/$PARTNAME/hostpath_pv
ln -s /mnt/$PARTNAME/hostpath_pv /tmp/hostpath_pv
rm -rf /var/lib/rkt
if [ ! -d /mnt/$PARTNAME/var/lib/rkt ]; then
mkdir -p /mnt/$PARTNAME/var/lib/rkt

View File

@ -2,4 +2,6 @@ sha256 7e7c122f92f1dd8e621580869903a367e6ba2dd80f3ab9bf40b089d972d0c827 rkt-v1.
sha256 dd514db743e9f8bdae9169bf416d6ed8a83e862e0621ce57a9d20a4f7bb1adbb rkt-v1.14.0.tar.gz.asc
sha256 2c00e816a5b470f29483d9660bd62e80da8f14e2a0ba79c841e15e1a28fbf975 rkt-v1.23.0.tar.gz
sha256 5aa2c2ac71f21bf3fc8a94b1bdd0b2c0f4060ad9054502b0a693f4632b093c2e rkt-v1.23.0.tar.gz.asc
sha256 0ec396f1af7782e402d789e6e34e9257033efac5db71d740f9742f3469d02298 rkt-v1.24.0.tar.gz
sha256 577a7a7e3512c0116b3642c710304d4f36a1f66c7e34ec8753dae168a29761e3 rkt-v1.24.0.tar.gz.asc
sha256 56eb40918ba8dfbfc30bfddb3d235c3485825af1e7bd9816bdc478716d40544b app-signing-pubkey.gpg

View File

@ -4,7 +4,7 @@
#
################################################################################
RKT_BIN_VERSION = 1.23.0
RKT_BIN_VERSION = 1.24.0
RKT_BIN_SITE = https://github.com/coreos/rkt/releases/download/v$(RKT_BIN_VERSION)
RKT_BIN_SOURCE = rkt-v$(RKT_BIN_VERSION).tar.gz
@ -21,7 +21,7 @@ define RKT_BIN_BUILD_CMDS
gpg2 --import $(BR2_DL_DIR)/app-signing-pubkey.gpg
gpg2 \
--trusted-key $(shell gpg2 --with-colons --keyid-format LONG -k security@coreos.com | egrep ^pub | cut -d ':' -f5) \
--trusted-key `gpg2 --with-colons --keyid-format long -k security@coreos.com | egrep ^pub | cut -d ':' -f5` \
--verify-files $(BR2_DL_DIR)/rkt-v$(RKT_BIN_VERSION).tar.gz.asc
mkdir -p $(TARGET_DIR)/var/lib/rkt

View File

@ -1,6 +1,9 @@
[
{
"version": "v1.6.0-alpha.0"
"version": "v1.6.0-alpha.1"
},
{
"version": "v1.5.3"
},
{
"version": "v1.5.2"

View File

@ -29,7 +29,7 @@ minikube start
--iso-url string Location of the minikube iso (default "https://storage.googleapis.com/minikube/iso/minikube-v1.0.6.iso")
--keep-context This will keep the existing kubectl context and will create a minikube context.
--kubernetes-version string The kubernetes version that the minikube VM will use (ex: v1.2.3)
OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) (default "v1.5.2")
OR a URI which contains a localkube binary (ex: https://storage.googleapis.com/minikube/k8sReleases/v1.3.0/localkube-linux-amd64) (default "v1.5.3")
--kvm-network string The KVM network name. (only supported with KVM driver) (default "default")
--memory int Amount of RAM allocated to the minikube VM (default 2048)
--network-plugin string The name of the network plugin

View File

@ -17,7 +17,7 @@
set -e
ISO="out/buildroot/output/images/rootfs.iso9660"
make minikube-iso
make minikube_iso
openssl sha256 ${ISO} | awk '{print $2}' > "${ISO}.sha256"
gsutil cp "${ISO}" "${DEST}"
gsutil cp "${ISO}.sha256" "${DEST}.sha256"

View File

@ -39,6 +39,9 @@ MINIKUBE_WANTREPORTERRORPROMPT=False \
rm -rf $HOME/.minikube || true
# See the default image
./out/minikube-${OS_ARCH} start -h | grep iso
# Allow this to fail, we'll switch on the return code below.
set +e
out/e2e-${OS_ARCH} -minikube-args="--vm-driver=${VM_DRIVER} --cpus=4 --v=100 ${EXTRA_BUILD_ARGS}" -test.v -test.timeout=30m -binary=out/minikube-${OS_ARCH}

View File

@ -1,77 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script creates the localkube binary for a specified kubernetes Github tag (ex: v1.4.0)
# The script expects the following env variables:
# K8S_VERSION: The version of kubernetes to build localkube with
# COMMIT: The commit to build minikube with
set -e
export GOPATH=$PWD
cd $GOPATH/src/k8s.io/minikube
git checkout origin/$COMMIT
echo "======= Restoring Minikube Deps ======="
godep restore ./...
cd $GOPATH/src/k8s.io/kubernetes
git fetch --tags
echo "======= Checking out Kubernetes ${K8S_VERSION} ======="
git checkout ${K8S_VERSION}
godep restore ./...
echo "======= Saving Kubernetes ${K8S_VERSION} Dependency======="
cd $GOPATH/src/k8s.io/minikube
rm -rf Godeps/ vendor/
godep save ./...
# Test and make for all platforms
make test cross
# Build the e2e test target for Darwin and Linux. We don't run tests on Windows yet.
# We build these on Linux, but run the tests on different platforms.
# This makes it easier to provision slaves, since they don't need to have a go toolchain.'
GOPATH=$(pwd)/_gopath GOOS=darwin GOARCH=amd64 go test -c k8s.io/minikube/test/integration --tags=integration -o out/e2e-darwin-amd64
GOPATH=$(pwd)/_gopath GOOS=linux GOARCH=amd64 go test -c k8s.io/minikube/test/integration --tags=integration -o out/e2e-linux-amd64
cp -r test/integration/testdata out/
# Upload to localkube builds
gsutil cp out/localkube gs://minikube/k8sReleases/${K8S_VERSION}+testing/localkube-linux-amd64
# Upload the SHA
openssl sha256 out/localkube | awk '{print $2}' > out/localkube.sha256
gsutil cp out/localkube.sha256 gs://minikube/k8sReleases/${K8S_VERSION}+testing/localkube-linux-amd64.sha256
# Upload the version of minikube that we used
gsutil cp -r out/* gs://minikubetest/localkubetest/${COMMIT}
make gendocs
git config user.name "minikube-bot"
git config user.email "minikube-bot@google.com"
git checkout -b "jenkins-${K8S_VERSION}"
git status
git add -A
git commit -m "Upgrade to k8s version ${K8S_VERSION}"

View File

@ -25,6 +25,17 @@ set -e
gsutil cp gs://minikube-builds/logs/index.html gs://minikube-builds/logs/${ghprbPullId}/index.html
# If there are ISO changes, build and upload the ISO
# then set the default to the newly built ISO for testing
if out="$(git diff origin/master --name-only | grep deploy/iso/minikube)" &> /dev/null; then
echo "ISO changes detected ... rebuilding ISO"
export ISO_BUCKET="minikube-builds/${ghprbPullId}"
export ISO_VERSION="testing"
make release-iso
fi
# Build all platforms (Windows, Linux, OSX)
make cross
@ -36,5 +47,8 @@ GOPATH=$(pwd)/_gopath GOOS=linux GOARCH=amd64 go test -c k8s.io/minikube/test/in
GOPATH=$(pwd)/_gopath GOOS=windows GOARCH=amd64 go test -c k8s.io/minikube/test/integration --tags=integration -o out/e2e-windows-amd64.exe
cp -r test/integration/testdata out/
# Don't upload the buildroot artifacts if they exist
rm -r out/buildroot || true
# Upload everything we built to Cloud Storage.
gsutil -m cp -r out/* gs://minikube-builds/${ghprbPullId}/

View File

@ -1,25 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on an OSX machine for the xhyve Driver
# The script expects the following env variables:
# K8S_VERSION: GIT_COMMIT from upstream build.
# BUCKET: The GCP bucket the build files should be uploaded to.
gsutil mv gs://${BUCKET}/k8sReleases/${K8S_VERSION}+testing/localkube-linux-amd64 gs://${BUCKET}/k8sReleases/${K8S_VERSION}/localkube-linux-amd64
gsutil mv gs://${BUCKET}/k8sReleases/${K8S_VERSION}+testing/localkube-linux-amd64.sha256 gs://${BUCKET}/k8sReleases/${K8S_VERSION}/localkube-linux-amd64.sha256

View File

@ -238,7 +238,7 @@ func localkubeURIWasSpecified(config KubernetesConfig) bool {
// SetupCerts gets the generated credentials required to talk to the APIServer.
func SetupCerts(d drivers.Driver) error {
localPath := constants.Minipath
localPath := constants.GetMinipath()
ipStr, err := d.GetIP()
if err != nil {
return errors.Wrap(err, "Error getting ip from driver")
@ -287,7 +287,7 @@ func engineOptions(config MachineConfig) *engine.Options {
}
func createVirtualboxHost(config MachineConfig) drivers.Driver {
d := virtualbox.NewDriver(constants.MachineName, constants.Minipath)
d := virtualbox.NewDriver(constants.MachineName, constants.GetMinipath())
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.Memory = config.Memory
d.CPU = config.CPUs
@ -328,8 +328,8 @@ func createHost(api libmachine.API, config MachineConfig) (*host.Host, error) {
return nil, errors.Wrap(err, "Error creating new host")
}
h.HostOptions.AuthOptions.CertDir = constants.Minipath
h.HostOptions.AuthOptions.StorePath = constants.Minipath
h.HostOptions.AuthOptions.CertDir = constants.GetMinipath()
h.HostOptions.AuthOptions.StorePath = constants.GetMinipath()
h.HostOptions.EngineOptions = engineOptions(config)
if err := api.Create(h); err != nil {

View File

@ -23,7 +23,7 @@ import (
)
func createVMwareFusionHost(config MachineConfig) drivers.Driver {
d := vmwarefusion.NewDriver(constants.MachineName, constants.Minipath).(*vmwarefusion.Driver)
d := vmwarefusion.NewDriver(constants.MachineName, constants.GetMinipath()).(*vmwarefusion.Driver)
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.Memory = config.Memory
d.CPU = config.CPUs
@ -56,7 +56,7 @@ func createXhyveHost(config MachineConfig) *xhyveDriver {
return &xhyveDriver{
BaseDriver: &drivers.BaseDriver{
MachineName: constants.MachineName,
StorePath: constants.Minipath,
StorePath: constants.GetMinipath(),
},
Memory: config.Memory,
CPU: config.CPUs,

View File

@ -29,7 +29,7 @@ func createKVMHost(config MachineConfig) *kvm.Driver {
return &kvm.Driver{
BaseDriver: &drivers.BaseDriver{
MachineName: constants.MachineName,
StorePath: constants.Minipath,
StorePath: constants.GetMinipath(),
},
Memory: config.Memory,
CPU: config.CPUs,
@ -37,8 +37,8 @@ func createKVMHost(config MachineConfig) *kvm.Driver {
PrivateNetwork: "docker-machines",
Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO),
DiskSize: config.DiskSize,
DiskPath: filepath.Join(constants.Minipath, "machines", constants.MachineName, fmt.Sprintf("%s.img", constants.MachineName)),
ISO: filepath.Join(constants.Minipath, "machines", constants.MachineName, "boot2docker.iso"),
DiskPath: filepath.Join(constants.GetMinipath(), "machines", constants.MachineName, fmt.Sprintf("%s.img", constants.MachineName)),
ISO: filepath.Join(constants.GetMinipath(), "machines", constants.MachineName, "boot2docker.iso"),
CacheMode: "default",
IOMode: "threads",
}

View File

@ -23,7 +23,7 @@ import (
)
func createHypervHost(config MachineConfig) drivers.Driver {
d := hyperv.NewDriver(constants.MachineName, constants.Minipath)
d := hyperv.NewDriver(constants.MachineName, constants.GetMinipath())
d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO)
d.VSwitch = config.HypervVirtualSwitch
d.MemSize = config.Memory

View File

@ -53,7 +53,7 @@ type localkubeCacher struct {
}
func (l *localkubeCacher) getLocalkubeCacheFilepath() string {
return filepath.Join(constants.Minipath, "cache", "localkube",
return filepath.Join(constants.GetMinipath(), "cache", "localkube",
filepath.Base(url.QueryEscape("localkube-"+l.k8sConf.KubernetesVersion)))
}

View File

@ -17,11 +17,14 @@ limitations under the License.
package constants
import (
"fmt"
"os"
"path/filepath"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/util/homedir"
"k8s.io/kubernetes/pkg/version"
minikubeVersion "k8s.io/minikube/pkg/version"
)
// MachineName is the name to use for the VM.
@ -30,8 +33,20 @@ const MachineName = "minikube"
// APIServerPort is the port that the API server should listen on.
const APIServerPort = 8443
const MinikubeHome = "MINIKUBE_HOME"
// Minipath is the path to the user's minikube dir
var Minipath = filepath.Join(homedir.HomeDir(), ".minikube")
func GetMinipath() string {
if os.Getenv(MinikubeHome) == "" {
return DefaultMinipath
}
if filepath.Base(os.Getenv(MinikubeHome)) == ".minikube" {
return os.Getenv(MinikubeHome)
}
return filepath.Join(os.Getenv(MinikubeHome), ".minikube")
}
var DefaultMinipath = filepath.Join(homedir.HomeDir(), ".minikube")
// KubeconfigPath is the path to the Kubernetes client config
var KubeconfigPath = clientcmd.RecommendedHomeFile
@ -47,7 +62,7 @@ const MinikubeEnvPrefix = "MINIKUBE"
// MakeMiniPath is a utility to calculate a relative path to our directory.
func MakeMiniPath(fileName ...string) string {
args := []string{Minipath}
args := []string{GetMinipath()}
args = append(args, fileName...)
return filepath.Join(args...)
}
@ -60,9 +75,7 @@ var LogFlags = [...]string{
const (
DefaultKeepContext = false
DefaultIsoUrl = "https://storage.googleapis.com/minikube/iso/minikube-v1.0.6.iso"
ShaSuffix = ".sha256"
DefaultIsoShaUrl = DefaultIsoUrl + ShaSuffix
DefaultMemory = 2048
DefaultCPUS = 2
DefaultDiskSize = "20g"
@ -76,6 +89,9 @@ const (
KubernetesVersionGCSURL = "https://storage.googleapis.com/minikube/k8s_releases.json"
)
var DefaultIsoUrl = fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s.iso", minikubeVersion.GetIsoPath(), minikubeVersion.GetIsoVersion())
var DefaultIsoShaUrl = DefaultIsoUrl + ShaSuffix
var DefaultKubernetesVersion = version.Get().GitVersion
var ConfigFilePath = MakeMiniPath("config")

View File

@ -43,7 +43,7 @@ import (
"github.com/pkg/errors"
)
type driverGetter func(string, []byte) (drivers.Driver, error)
type driverGetter func([]byte) (drivers.Driver, error)
type ClientType int
type clientFactory interface {
@ -79,7 +79,7 @@ const (
// Gets a new client depending on the clientType specified
// defaults to the libmachine client
func NewAPIClient(clientType ClientType) (libmachine.API, error) {
storePath := constants.Minipath
storePath := constants.GetMinipath()
certsDir := constants.MakeMiniPath("certs")
newClientFactory, ok := clientFactories[clientType]
if !ok {
@ -94,7 +94,7 @@ func getDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
if !ok {
return nil, fmt.Errorf("Unknown driver %s for platform.", driverName)
}
driver, err := driverGetter(driverName, rawDriver)
driver, err := driverGetter(rawDriver)
if err != nil {
return nil, errors.Wrapf(err, "Error getting driver for %s", driverName)
}
@ -102,7 +102,7 @@ func getDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
return driver, nil
}
func getVirtualboxDriver(_ string, rawDriver []byte) (drivers.Driver, error) {
func getVirtualboxDriver(rawDriver []byte) (drivers.Driver, error) {
var driver drivers.Driver
driver = virtualbox.NewDriver("", "")
err := json.Unmarshal(rawDriver, driver)

View File

@ -33,7 +33,7 @@ var driverMap = map[string]driverGetter{
"virtualbox": getVirtualboxDriver,
}
func getVMWareFusionDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
func getVMWareFusionDriver(rawDriver []byte) (drivers.Driver, error) {
var driver drivers.Driver
driver = &vmwarefusion.Driver{}
if err := json.Unmarshal(rawDriver, &driver); err != nil {
@ -43,7 +43,7 @@ func getVMWareFusionDriver(driverName string, rawDriver []byte) (drivers.Driver,
}
// Xhyve driver not implemented yet for non-RPC access
func getXhyveDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
func getXhyveDriver(rawDriver []byte) (drivers.Driver, error) {
return nil, errors.New(`
The Xhyve driver is not included in minikube yet. Please follow the directions at
https://github.com/kubernetes/minikube/blob/master/DRIVERS.md#xhyve-driver

View File

@ -32,7 +32,7 @@ var driverMap = map[string]driverGetter{
"virtualbox": getVirtualboxDriver,
}
func getKVMDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
func getKVMDriver(rawDriver []byte) (drivers.Driver, error) {
var driver drivers.Driver
driver = &kvm.Driver{}
if err := json.Unmarshal(rawDriver, &driver); err != nil {

View File

@ -22,6 +22,7 @@ import (
"log"
"net"
"os"
"path/filepath"
"testing"
"github.com/docker/machine/libmachine/drivers/plugin/localbinary"
@ -33,8 +34,9 @@ func makeTempDir() string {
if err != nil {
log.Fatal(err)
}
constants.Minipath = tempDir
return tempDir
tempDir = filepath.Join(tempDir, ".minikube")
os.Setenv(constants.MinikubeHome, tempDir)
return constants.GetMinipath()
}
func TestRunNotDriver(t *testing.T) {

View File

@ -32,7 +32,7 @@ var driverMap = map[string]driverGetter{
"virtualbox": getVirtualboxDriver,
}
func getHyperVDriver(driverName string, rawDriver []byte) (drivers.Driver, error) {
func getHyperVDriver(rawDriver []byte) (drivers.Driver, error) {
var driver drivers.Driver
driver = &hyperv.Driver{}
if err := json.Unmarshal(rawDriver, &driver); err != nil {

View File

@ -30,6 +30,7 @@ func MakeTempDir() string {
if err != nil {
log.Fatal(err)
}
tempDir = filepath.Join(tempDir, ".minikube")
err = os.MkdirAll(filepath.Join(tempDir, "addons"), 0777)
if err != nil {
log.Fatal(err)
@ -42,6 +43,6 @@ func MakeTempDir() string {
if err != nil {
log.Fatal(err)
}
constants.Minipath = tempDir
return tempDir
os.Setenv(constants.MinikubeHome, tempDir)
return constants.GetMinipath()
}

View File

@ -46,7 +46,7 @@ func (f DefaultDownloader) GetISOFileURI(isoURL string) string {
if urlObj.Scheme == fileScheme {
return isoURL
}
isoPath := filepath.Join(constants.Minipath, "cache", "iso", filepath.Base(isoURL))
isoPath := filepath.Join(constants.GetMinipath(), "cache", "iso", filepath.Base(isoURL))
// As this is a file URL there should be no backslashes regardless of platform running on.
return "file://" + filepath.ToSlash(isoPath)
}
@ -97,7 +97,7 @@ func (f DefaultDownloader) shouldCacheMinikubeISO(isoURL string) bool {
}
func (f DefaultDownloader) getISOCacheFilepath(isoURL string) string {
return filepath.Join(constants.Minipath, "cache", "iso", filepath.Base(isoURL))
return filepath.Join(constants.GetMinipath(), "cache", "iso", filepath.Base(isoURL))
}
func (f DefaultDownloader) isMinikubeISOCached(isoURL string) bool {

View File

@ -28,10 +28,22 @@ const VersionPrefix = "v"
var version = "v0.0.0-unset"
var isoVersion = "v0.0.0-unset"
var isoPath = "minikube/iso"
func GetVersion() string {
return version
}
func GetIsoVersion() string {
return isoVersion
}
func GetIsoPath() string {
return isoPath
}
func GetSemverVersion() (semver.Version, error) {
return semver.Make(strings.TrimPrefix(GetVersion(), VersionPrefix))
}

View File

@ -26,6 +26,8 @@ import (
"testing"
"time"
"github.com/pkg/errors"
"k8s.io/kubernetes/pkg/api"
commonutil "k8s.io/minikube/pkg/util"
@ -45,7 +47,7 @@ func testAddons(t *testing.T) {
checkAddon := func() error {
pods := api.PodList{}
if err := kubectlRunner.RunCommandParseOutput(addonManagerCmd, &pods); err != nil {
return err
return &commonutil.RetriableError{Err: errors.Wrap(err, "Error parsing kubectl output")}
}
for _, p := range pods.Items {

View File

@ -38,6 +38,7 @@ func TestISO(t *testing.T) {
t.Run("permissions", testMountPermissions)
t.Run("packages", testPackages)
t.Run("persistence", testPersistence)
}
func testMountPermissions(t *testing.T) {
@ -89,3 +90,28 @@ func testPackages(t *testing.T) {
}
}
func testPersistence(t *testing.T) {
minikubeRunner := util.MinikubeRunner{
Args: *args,
BinaryPath: *binaryPath,
T: t}
for _, dir := range []string{
"/data",
"/var/lib/docker",
"/var/lib/cni",
"/var/lib/kubelet",
"/var/lib/localkube",
"/var/lib/rkt",
"/var/lib/boot2docker",
} {
output, err := minikubeRunner.SSH(fmt.Sprintf("df %s | tail -n 1 | awk '{print $1}'", dir))
if err != nil {
t.Errorf("Error checking device for %s. Error: %s.", dir, err)
}
if !strings.Contains(output, "/dev/sda1") {
t.Errorf("Path %s is not mounted persistently. %s", dir, output)
}
}
}

View File

@ -24,6 +24,7 @@ import (
"testing"
"time"
commonutil "k8s.io/minikube/pkg/util"
"k8s.io/minikube/test/integration/util"
)
@ -45,12 +46,14 @@ func TestStartStop(t *testing.T) {
t.Fatalf("IP command returned an invalid address: %s", ip)
}
// TODO:r2d4 The KVM driver can't handle
// starting and stopping immediately
time.Sleep(30 * time.Second)
checkStop := func() error {
runner.RunCommand("stop", true)
runner.CheckStatus("Stopped")
return runner.CheckStatusNoFail("Stopped")
}
if err := commonutil.RetryAfter(6, checkStop, 5*time.Second); err != nil {
t.Fatalf("timed out while checking stopped status: %s", err)
}
runner.Start()
runner.CheckStatus("Running")

View File

@ -111,10 +111,17 @@ func (m *MinikubeRunner) GetStatus() string {
}
func (m *MinikubeRunner) CheckStatus(desired string) {
if err := m.CheckStatusNoFail(desired); err != nil {
m.T.Fatalf("%v", err)
}
}
func (m *MinikubeRunner) CheckStatusNoFail(desired string) error {
s := m.GetStatus()
if s != desired {
m.T.Fatalf("Machine is in the wrong state: %s, expected %s", s, desired)
return fmt.Errorf("Machine is in the wrong state: %s, expected %s", s, desired)
}
return nil
}
type KubectlRunner struct {

15
vendor/cloud.google.com/go/AUTHORS generated vendored
View File

@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

View File

@ -1,34 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

View File

@ -302,7 +302,7 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
var newToken Token
err = autorest.Respond(resp,
autorest.WithErrorUnlessOK(),
autorest.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&newToken),
autorest.ByClosing())
if err != nil {

View File

@ -28,6 +28,9 @@ type DetailedError struct {
// Message is the error message.
Message string
// Service Error is the response body of failed API in bytes
ServiceError []byte
}
// NewError creates a new Error conforming object from the passed packageType, method, and

View File

@ -165,17 +165,24 @@ func ByUnmarshallingXML(v interface{}) RespondDecorator {
}
// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response
// StatusCode is among the set passed. Since these are artificial errors, the response body
// may still require closing.
// StatusCode is among the set passed. On error, response body is fully read into a buffer and
// presented in the returned error, as well as in the response body.
func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
return func(r Responder) Responder {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && !ResponseHasStatusCode(resp, codes...) {
err = NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s",
resp.Request.Method,
resp.Request.URL,
resp.Status)
if resp.Body != nil {
defer resp.Body.Close()
b, _ := ioutil.ReadAll(resp.Body)
derr.ServiceError = b
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
}
err = derr
}
return err
})

View File

@ -73,7 +73,7 @@ func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*ht
func AfterDelay(d time.Duration) SendDecorator {
return func(s Sender) Sender {
return SenderFunc(func(r *http.Request) (*http.Response, error) {
if !DelayForBackoff(d, 1, r.Cancel) {
if !DelayForBackoff(d, 0, r.Cancel) {
return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay")
}
return s.Do(r)
@ -257,6 +257,8 @@ func WithLogging(logger *log.Logger) SendDecorator {
// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set
// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early,
// returns false.
// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt
// count.
func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool {
select {
case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second):

View File

@ -636,18 +636,13 @@ func (d *Driver) getIPByMacFromSettings(mac string) (string, error) {
log.Warnf("Failed to decode dnsmasq lease status: %s", err)
return "", err
}
ipAddr := ""
for _, value := range s {
if strings.ToLower(value.Mac_address) == strings.ToLower(mac) {
// If there are multiple entries,
// the last one is the most current
ipAddr = value.Ip_address
log.Debugf("IP address: %s", value.Ip_address)
return value.Ip_address, nil
}
}
if ipAddr != "" {
log.Debugf("IP Address: %s", ipAddr)
}
return ipAddr, nil
return "", nil
}
func (d *Driver) GetIP() (string, error) {

View File

@ -1,128 +0,0 @@
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
Alex Chan <alex.chan@metaswitch.com>
Alex Elman <aelman@indeed.com>
amitshukla <ashukla73@hotmail.com>
Amy Lindburg <amy.lindburg@docker.com>
Andrew Meredith <andymeredith@gmail.com>
Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Asuka Suzuki <hello@tanksuzuki.com>
Avi Miller <avi.miller@oracle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Firshman <ben@firshman.co.uk>
bin liu <liubin0329@gmail.com>
Brian Bland <brian.bland@docker.com>
burnettk <burnettk@gmail.com>
Carson A <ca@carsonoid.net>
Chris Dillon <squarism@gmail.com>
Daisuke Fujita <dtanshi45@gmail.com>
Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
DJ Enriquez <dj.enriquez@infospace.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Eric Yang <windfarer@gmail.com>
farmerworking <farmerworking@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Florentin Raud <florentin.raud@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
gabriell nascimento <gabriell@bluesoft.com.br>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
Jack Griffin <jackpg14@gmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jeff Nickoloff <jeff@allingeek.com>
Jessie Frazelle <jessie@docker.com>
Jianqing Wang <tsing@jianqing.org>
John Starks <jostarks@microsoft.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
liuchang0812 <liuchang0812@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Miquel Sabaté <msabate@suse.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
Omer Cohen <git@omer.io>
Patrick Devine <patrick.devine@docker.com>
Philip Misiowiec <philip@atlashealth.com>
Richard Scothern <richard.scothern@docker.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rusty Conover <rusty@luckydinosaur.com>
Sean Boran <Boran@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stefan Weil <sw@weilnetz.de>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Ted Reed <ted.reed@gmail.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tonis Tiigi <tonistiigi@gmail.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yuzou <zouyu7@huawei.com>
姜继忠 <jizhong.jiangjz@alibaba-inc.com>

1460
vendor/github.com/docker/docker/AUTHORS generated vendored

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
../CONTRIBUTING.md

View File

@ -216,6 +216,9 @@ You also might want to clear any VirtualBox host only interfaces you are not usi
}
func (provisioner *Boot2DockerProvisioner) Provision(swarmOptions swarm.Options, authOptions auth.Options, engineOptions engine.Options) error {
fmt.Println(`==========
WARNING: The boot2docker iso is deprecated and will not be supported by future versions of minikube")
==========`)
var (
err error
)

View File

@ -1,15 +0,0 @@
Anton Povarov <anton.povarov@gmail.com>
Clayton Coleman <ccoleman@redhat.com>
Denis Smirnov <denis.smirnov.91@gmail.com>
DongYun Kang <ceram1000@gmail.com>
Dwayne Schultz <dschultz@pivotal.io>
Georg Apitz <gapitz@pivotal.io>
Gustav Paul <gustav.paul@gmail.com>
John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com>
Stephen J Day <stephen.day@docker.com>
Tamir Duberstein <tamird@gmail.com>
Todd Eisenberger <teisenberger@dropbox.com>
Tormod Erevik Lea <tormodlea@gmail.com>
Walter Schulze <awalterschulze@gmail.com>

View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,9 +0,0 @@
# This is the official list of cAdvisor authors for copyright purposes.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Google Inc.

View File

@ -1,23 +0,0 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Comodo CA Limited
Ed Maste <emaste@freebsd.org>
Google Inc.
Jeff Trawick <trawick@gmail.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Mark Schloesser <ms@mwcollect.org>
NORDUnet A/S
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Venafi, Inc.
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@ -1,47 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Adam Eijdenberg <eijdenberg@google.com> <adam.eijdenberg@gmail.com>
Al Cutter <al@google.com>
Ben Laurie <benl@google.com> <ben@links.org>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Deyan Bektchiev <deyan.bektchiev@venafi.com> <deyan@bektchiev.net>
Ed Maste <emaste@freebsd.org>
Emilia Kasper <ekasper@google.com>
Eran Messeri <eranm@google.com> <eran.mes@gmail.com>
Jeff Trawick <trawick@gmail.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Konrad Kraszewski <kraszewski@google.com> <laiquendir@gmail.com>
Linus Nordberg <linus@nordu.net>
Mark Schloesser <ms@mwcollect.org>
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
Pascal Leroy <phl@google.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Paul Lietar <lietar@google.com>
Pierre Phaneuf <pphaneuf@google.com>
Rob Stradling <rob@comodo.com>
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@ -1,8 +0,0 @@
lpabon@redhat.com
campbellalex11@gmail.com
sid@sidcarter.com
hchiramm@redhat.com
mliyazud@redhat.com
nerawat@redhat.com
obnox@redhat.com
obnox@samba.org

View File

@ -1,18 +0,0 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Björn Rabenstein <beorn@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Bernerd Schaefer <bj.schaefer@gmail.com>
* Björn Rabenstein <beorn@soundcloud.com>
* Daniel Bornkessel <daniel@soundcloud.com>
* Jeff Younker <jeff@drinktomi.com>
* Julius Volz <julius@soundcloud.com>
* Matt T. Proud <matt.proud@gmail.com>
* Tobias Schmidt <ts@soundcloud.com>

View File

@ -1,13 +0,0 @@
The Prometheus project was started by Matt T. Proud (emeritus) and
Julius Volz in 2012.
Maintainers of this repository:
* Björn Rabenstein <beorn@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Björn Rabenstein <beorn@soundcloud.com>
* Matt T. Proud <matt.proud@gmail.com>
* Tobias Schmidt <ts@soundcloud.com>

View File

@ -1,11 +0,0 @@
Maintainers of this repository:
* Fabian Reinartz <fabian@soundcloud.com>
The following individuals have contributed code to this repository
(listed in alphabetical order):
* Björn Rabenstein <beorn@soundcloud.com>
* Fabian Reinartz <fabian@soundcloud.com>
* Julius Volz <julius@soundcloud.com>
* Miguel Molina <hi@mvader.me>

8
vendor/go4.org/AUTHORS generated vendored
View File

@ -1,8 +0,0 @@
# This is the official list of go4 authors for copyright purposes.
# This is distinct from the CONTRIBUTORS file, which is the list of
# people who have contributed, even if they don't own the copyright on
# their work.
Mathieu Lonjaret <mathieu.lonjaret@gmail.com>
Daniel Theophanes <kardianos@gmail.com>
Google

3
vendor/golang.org/x/crypto/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

3
vendor/golang.org/x/exp/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

3
vendor/golang.org/x/net/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

3
vendor/golang.org/x/sys/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

3
vendor/golang.org/x/text/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

10
vendor/google.golang.org/api/AUTHORS generated vendored
View File

@ -1,10 +0,0 @@
# This is the official list of authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Google Inc.

View File

@ -1,50 +0,0 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://cla.developers.google.com/about/google-individual
# https://cla.developers.google.com/about/google-corporate
#
# The CLA can be filled out on the web:
#
# https://cla.developers.google.com/
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
#
# An entry with two email addresses specifies that the
# first address should be used in the submit logs and
# that the second address should be recognized as the
# same person when interacting with Rietveld.
# Please keep the list sorted.
Alain Vongsouvanhalainv <alainv@google.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Eric Koleda <ekoleda+devrel@googlers.com>
Francesc Campoy <campoy@golang.org>
Garrick Evans <garrick@google.com>
Glenn Lewis <gmlewis@google.com>
Ivan Krasin <krasin@golang.org>
Jason Hall <jasonhall@google.com>
Johan Euphrosine <proppy@google.com>
Kostik Shtoyk <kostik@google.com>
Michael McGreevy <mcgreevy@golang.org>
Nick Craig-Wood <nickcw@gmail.com>
Scott Van Woudenberg <scottvw@google.com>
Takashi Matsuo <tmatsuo@google.com>

View File

@ -95,7 +95,7 @@ func NewCMServer() *CMServer {
ConcurrentGCSyncs: 20,
ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem",
ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key",
ReconcilerSyncLoopPeriod: unversioned.Duration{Duration: 5 * time.Second},
ReconcilerSyncLoopPeriod: unversioned.Duration{Duration: 60 * time.Second},
},
}
s.LeaderElection.LeaderElect = true

View File

@ -326,6 +326,13 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch
return
}
trace.Step("Self-linking done")
// Ensure empty lists return a non-nil items slice
if numberOfItems == 0 && meta.IsListType(result) {
if err := meta.SetList(result, []runtime.Object{}); err != nil {
scope.err(err, res.ResponseWriter, req.Request)
return
}
}
write(http.StatusOK, scope.Kind.GroupVersion(), scope.Serializer, result, w, req.Request)
trace.Step(fmt.Sprintf("Writing http response done (%d items)", numberOfItems))
}

View File

@ -746,10 +746,11 @@ func (r *Request) Stream() (io.ReadCloser, error) {
defer resp.Body.Close()
result := r.transformResponse(resp, req)
if result.err != nil {
return nil, result.err
err := result.Error()
if err == nil {
err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
}
return nil, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
return nil, err
}
}

View File

@ -18,6 +18,7 @@ go_library(
"aws_loadbalancer.go",
"aws_routes.go",
"aws_utils.go",
"device_allocator.go",
"log_handler.go",
"retry_handler.go",
"sets_ippermissions.go",
@ -52,6 +53,7 @@ go_test(
name = "go_default_test",
srcs = [
"aws_test.go",
"device_allocator_test.go",
"retry_handler_test.go",
],
library = "go_default_library",

View File

@ -365,6 +365,9 @@ type Cloud struct {
// and then get a second request before we attach the volume
attachingMutex sync.Mutex
attaching map[types.NodeName]map[mountDevice]awsVolumeID
// state of our device allocator for each node
deviceAllocators map[types.NodeName]DeviceAllocator
}
var _ Volumes = &Cloud{}
@ -797,6 +800,7 @@ func newAWSCloud(config io.Reader, awsServices Services) (*Cloud, error) {
region: regionName,
attaching: make(map[types.NodeName]map[mountDevice]awsVolumeID),
deviceAllocators: make(map[types.NodeName]DeviceAllocator),
}
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
@ -1167,16 +1171,16 @@ func (i *awsInstance) describeInstance() (*ec2.Instance, error) {
// Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice.
// If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true.
// Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false.
func (c *Cloud) getMountDevice(i *awsInstance, volumeID awsVolumeID, assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
func (c *Cloud) getMountDevice(
i *awsInstance,
info *ec2.Instance,
volumeID awsVolumeID,
assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
instanceType := i.getInstanceType()
if instanceType == nil {
return "", false, fmt.Errorf("could not get instance type for instance: %s", i.awsID)
}
info, err := i.describeInstance()
if err != nil {
return "", false, err
}
deviceMappings := map[mountDevice]awsVolumeID{}
for _, blockDevice := range info.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
@ -1216,20 +1220,17 @@ func (c *Cloud) getMountDevice(i *awsInstance, volumeID awsVolumeID, assign bool
return mountDevice(""), false, nil
}
// Find the first unused device in sequence 'ba', 'bb', 'bc', ... 'bz', 'ca', ... 'zz'
var chosen mountDevice
for first := 'b'; first <= 'z' && chosen == ""; first++ {
for second := 'a'; second <= 'z' && chosen == ""; second++ {
candidate := mountDevice(fmt.Sprintf("%c%c", first, second))
if _, found := deviceMappings[candidate]; !found {
chosen = candidate
break
// Find the next unused device name
deviceAllocator := c.deviceAllocators[i.nodeName]
if deviceAllocator == nil {
// we want device names with two significant characters, starting with
// /dev/xvdba (leaving xvda - xvdz and xvdaa-xvdaz to the system)
deviceAllocator = NewDeviceAllocator(2, "ba")
c.deviceAllocators[i.nodeName] = deviceAllocator
}
}
}
if chosen == "" {
glog.Warningf("Could not assign a mount device (all in use?). mappings=%v", deviceMappings)
chosen, err := deviceAllocator.GetNext(deviceMappings)
if err != nil {
glog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err)
return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", i.nodeName)
}
@ -1437,7 +1438,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
return "", err
}
awsInstance, err := c.getAwsInstance(nodeName)
awsInstance, info, err := c.getFullInstance(nodeName)
if err != nil {
return "", fmt.Errorf("error finding instance %s: %v", nodeName, err)
}
@ -1464,7 +1465,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName,
}
}()
mountDevice, alreadyAttached, err = c.getMountDevice(awsInstance, disk.awsID, true)
mountDevice, alreadyAttached, err = c.getMountDevice(awsInstance, info, disk.awsID, true)
if err != nil {
return "", err
}
@ -1524,7 +1525,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName)
return "", err
}
awsInstance, err := c.getAwsInstance(nodeName)
awsInstance, info, err := c.getFullInstance(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
@ -1538,7 +1539,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName)
return "", err
}
mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, disk.awsID, false)
mountDevice, alreadyAttached, err := c.getMountDevice(awsInstance, info, disk.awsID, false)
if err != nil {
return "", err
}
@ -1722,7 +1723,7 @@ func (c *Cloud) GetDiskPath(volumeName KubernetesVolumeID) (string, error) {
// DiskIsAttached implements Volumes.DiskIsAttached
func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
awsInstance, err := c.getAwsInstance(nodeName)
_, instance, err := c.getFullInstance(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
@ -1741,11 +1742,7 @@ func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeN
return false, fmt.Errorf("error mapping volume spec %q to aws id: %v", diskName, err)
}
info, err := awsInstance.describeInstance()
if err != nil {
return false, err
}
for _, blockDevice := range info.BlockDeviceMappings {
for _, blockDevice := range instance.BlockDeviceMappings {
id := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
if id == diskID {
return true, nil
@ -1765,7 +1762,7 @@ func (c *Cloud) DisksAreAttached(diskNames []KubernetesVolumeID, nodeName types.
idToDiskName[volumeID] = diskName
attached[diskName] = false
}
awsInstance, err := c.getAwsInstance(nodeName)
_, instance, err := c.getFullInstance(nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// If instance no longer exists, safe to assume volume is not attached.
@ -1778,11 +1775,7 @@ func (c *Cloud) DisksAreAttached(diskNames []KubernetesVolumeID, nodeName types.
return attached, err
}
info, err := awsInstance.describeInstance()
if err != nil {
return attached, err
}
for _, blockDevice := range info.BlockDeviceMappings {
for _, blockDevice := range instance.BlockDeviceMappings {
volumeID := awsVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId))
diskName, found := idToDiskName[volumeID]
if found {
@ -3123,7 +3116,7 @@ func (c *Cloud) getInstanceByID(instanceID string) (*ec2.Instance, error) {
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", instanceID)
return nil, cloudprovider.InstanceNotFound
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
@ -3254,6 +3247,19 @@ func (c *Cloud) getInstanceByNodeName(nodeName types.NodeName) (*ec2.Instance, e
return instance, err
}
func (c *Cloud) getFullInstance(nodeName types.NodeName) (*awsInstance, *ec2.Instance, error) {
if nodeName == "" {
instance, err := c.getInstanceByID(c.selfAWSInstance.awsID)
return c.selfAWSInstance, instance, err
}
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return nil, nil, err
}
awsInstance := newAWSInstance(c.ec2, instance)
return awsInstance, instance, err
}
// Add additional filters, to match on our tags
// This lets us run multiple k8s clusters in a single EC2 AZ
func (c *Cloud) addFilters(filters []*ec2.Filter) []*ec2.Filter {

View File

@ -0,0 +1,95 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import "fmt"
// ExistingDevices is a map of assigned devices. Presence of a key with a device
// name in the map means that the device is allocated. Value is irrelevant and
// can be used for anything that DeviceAllocator user wants.
// Only the relevant part of device name should be in the map, e.g. "ba" for
// "/dev/xvdba".
type ExistingDevices map[mountDevice]awsVolumeID
// On AWS, we should assign new (not yet used) device names to attached volumes.
// If we reuse a previously used name, we may get the volume "attaching" forever,
// see https://aws.amazon.com/premiumsupport/knowledge-center/ebs-stuck-attaching/.
// DeviceAllocator finds available device name, taking into account already
// assigned device names from ExistingDevices map. It tries to find the next
// device name to the previously assigned one (from previous DeviceAllocator
// call), so all available device names are used eventually and it minimizes
// device name reuse.
// All these allocations are in-memory, nothing is written to / read from
// /dev directory.
type DeviceAllocator interface {
// GetNext returns a free device name or error when there is no free device
// name. Only the device suffix is returned, e.g. "ba" for "/dev/xvdba".
// It's up to the called to add appropriate "/dev/sd" or "/dev/xvd" prefix.
GetNext(existingDevices ExistingDevices) (mountDevice, error)
}
type deviceAllocator struct {
firstDevice mountDevice
lastAssignedDevice mountDevice
length int
}
// NewDeviceAllocator creates new DeviceAlllocator that allocates device names
// of given length ("aaa" for length 3) and with given first device, so all
// devices before the first device are left to the operating system.
// With length 2 and firstDevice "ba", it will allocate device names
// ba, bb, ..., bz, ca, ... cz, ..., da, ... zz, so a..z and aa..az can be used
// by the operating system.
func NewDeviceAllocator(length int, firstDevice mountDevice) DeviceAllocator {
lastDevice := make([]byte, length)
for i := 0; i < length; i++ {
lastDevice[i] = 'z'
}
return &deviceAllocator{
firstDevice: firstDevice,
lastAssignedDevice: mountDevice(lastDevice),
length: length,
}
}
func (d *deviceAllocator) GetNext(existingDevices ExistingDevices) (mountDevice, error) {
candidate := d.lastAssignedDevice
for {
candidate = d.nextDevice(candidate)
if _, found := existingDevices[candidate]; !found {
d.lastAssignedDevice = candidate
return candidate, nil
}
if candidate == d.lastAssignedDevice {
return "", fmt.Errorf("no devices are available")
}
}
}
func (d *deviceAllocator) nextDevice(device mountDevice) mountDevice {
dev := []byte(device)
for i := d.length - 1; i >= 0; i-- {
if dev[i] != 'z' {
dev[i]++
return mountDevice(dev)
}
dev[i] = 'a'
}
// all parts of device were 'z', jump to the first device
return d.firstDevice
}

View File

@ -27,6 +27,7 @@ import (
"github.com/Azure/azure-sdk-for-go/arm/storage"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/ghodss/yaml"
"time"
)
// CloudProviderName is the value used for the --cloud-provider flag
@ -128,6 +129,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
az.VirtualMachinesClient = compute.NewVirtualMachinesClient(az.SubscriptionID)
az.VirtualMachinesClient.BaseURI = az.Environment.ResourceManagerEndpoint
az.VirtualMachinesClient.Authorizer = servicePrincipalToken
az.VirtualMachinesClient.PollingDelay = 5 * time.Second
az.PublicIPAddressesClient = network.NewPublicIPAddressesClient(az.SubscriptionID)
az.PublicIPAddressesClient.BaseURI = az.Environment.ResourceManagerEndpoint

View File

@ -83,6 +83,10 @@ func (az *Cloud) EnsureLoadBalancer(clusterName string, service *api.Service, no
}
if sgNeedsUpdate {
glog.V(3).Infof("ensure(%s): sg(%s) - updating", serviceName, *sg.Name)
// azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these
// to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed
sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil
sg.SecurityGroupPropertiesFormat.Subnets = nil
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *sg.Name, sg, nil)
if err != nil {
return nil, err
@ -200,6 +204,10 @@ func (az *Cloud) EnsureLoadBalancerDeleted(clusterName string, service *api.Serv
}
if sgNeedsUpdate {
glog.V(3).Infof("delete(%s): sg(%s) - updating", serviceName, az.SecurityGroupName)
// azure-sdk-for-go introduced contraint validation which breaks the updating here if we don't set these
// to nil. This is a workaround until https://github.com/Azure/go-autorest/issues/112 is fixed
sg.SecurityGroupPropertiesFormat.NetworkInterfaces = nil
sg.SecurityGroupPropertiesFormat.Subnets = nil
_, err := az.SecurityGroupsClient.CreateOrUpdate(az.ResourceGroup, *reconciledSg.Name, reconciledSg, nil)
if err != nil {
return err

View File

@ -20,7 +20,7 @@ import (
"errors"
"fmt"
"io"
"net"
"io/ioutil"
"net/url"
"path"
"path/filepath"
@ -100,8 +100,6 @@ type VSphere struct {
cfg *VSphereConfig
// InstanceID of the server where this VSphere object is instantiated.
localInstanceID string
// Cluster that VirtualMachine belongs to
clusterName string
}
type VSphereConfig struct {
@ -124,12 +122,17 @@ type VSphereConfig struct {
WorkingDir string `gcfg:"working-dir"`
// Soap round tripper count (retries = RoundTripper - 1)
RoundTripperCount uint `gcfg:"soap-roundtrip-count"`
// VMUUID is the VM Instance UUID of virtual machine which can be retrieved from instanceUuid
// property in VmConfigInfo, or also set as vc.uuid in VMX file.
// If not set, will be fetched from the machine via sysfs (requires root)
VMUUID string `gcfg:"vm-uuid"`
}
Network struct {
// PublicNetwork is name of the network the VMs are joined to.
PublicNetwork string `gcfg:"public-network"`
}
Disk struct {
// SCSIControllerType defines SCSI controller to be used.
SCSIControllerType string `dcfg:"scsicontrollertype"`
@ -201,17 +204,29 @@ func init() {
})
}
// Returns the name of the VM and its Cluster on which this code is running.
// This is done by searching for the name of virtual machine by current IP.
// Returns the name of the VM on which this code is running.
// Prerequisite: this code assumes VMWare vmtools or open-vm-tools to be installed in the VM.
func readInstance(client *govmomi.Client, cfg *VSphereConfig) (string, string, error) {
addrs, err := net.InterfaceAddrs()
// Will attempt to determine the machine's name via it's UUID in this precedence order, failing if neither have a UUID:
// * cloud config value VMUUID
// * sysfs entry
func getVMName(client *govmomi.Client, cfg *VSphereConfig) (string, error) {
var vmUUID string
if cfg.Global.VMUUID != "" {
vmUUID = cfg.Global.VMUUID
} else {
// This needs root privileges on the host, and will fail otherwise.
vmUUIDbytes, err := ioutil.ReadFile("/sys/devices/virtual/dmi/id/product_uuid")
if err != nil {
return "", "", err
return "", err
}
if len(addrs) == 0 {
return "", "", fmt.Errorf("unable to retrieve Instance ID")
vmUUID = string(vmUUIDbytes)
cfg.Global.VMUUID = vmUUID
}
if vmUUID == "" {
return "", fmt.Errorf("unable to determine machine ID from cloud configuration or sysfs")
}
// Create context
@ -224,58 +239,29 @@ func readInstance(client *govmomi.Client, cfg *VSphereConfig) (string, string, e
// Fetch and set data center
dc, err := f.Datacenter(ctx, cfg.Global.Datacenter)
if err != nil {
return "", "", err
return "", err
}
f.SetDatacenter(dc)
s := object.NewSearchIndex(client.Client)
var svm object.Reference
for _, v := range addrs {
ip, _, err := net.ParseCIDR(v.String())
svm, err := s.FindByUuid(ctx, dc, strings.ToLower(strings.TrimSpace(vmUUID)), true, nil)
if err != nil {
return "", "", fmt.Errorf("unable to parse cidr from ip")
}
// Finds a virtual machine or host by IP address.
svm, err = s.FindByIp(ctx, dc, ip.String(), true)
if err == nil && svm != nil {
break
}
}
if svm == nil {
return "", "", fmt.Errorf("unable to retrieve vm reference from vSphere")
return "", err
}
var vm mo.VirtualMachine
err = s.Properties(ctx, svm.Reference(), []string{"name", "resourcePool"}, &vm)
err = s.Properties(ctx, svm.Reference(), []string{"name"}, &vm)
if err != nil {
return "", "", err
return "", err
}
var cluster string
if vm.ResourcePool != nil {
// Extract the Cluster Name if VM belongs to a ResourcePool
var rp mo.ResourcePool
err = s.Properties(ctx, *vm.ResourcePool, []string{"parent"}, &rp)
if err == nil {
var ccr mo.ComputeResource
err = s.Properties(ctx, *rp.Parent, []string{"name"}, &ccr)
if err == nil {
cluster = ccr.Name
} else {
glog.Warningf("VM %s, does not belong to a vSphere Cluster, will not have FailureDomain label", vm.Name)
}
} else {
glog.Warningf("VM %s, does not belong to a vSphere Cluster, will not have FailureDomain label", vm.Name)
}
}
return vm.Name, cluster, nil
return vm.Name, nil
}
func newVSphere(cfg VSphereConfig) (*VSphere, error) {
if cfg.Disk.SCSIControllerType == "" {
cfg.Disk.SCSIControllerType = LSILogicSASControllerType
cfg.Disk.SCSIControllerType = PVSCSIControllerType
} else if !checkControllerSupported(cfg.Disk.SCSIControllerType) {
glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType)
return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'")
@ -292,7 +278,7 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) {
return nil, err
}
id, cluster, err := readInstance(c, &cfg)
id, err := getVMName(c, &cfg)
if err != nil {
return nil, err
}
@ -301,7 +287,6 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) {
client: c,
cfg: &cfg,
localInstanceID: id,
clusterName: cluster,
}
runtime.SetFinalizer(&vs, logout)
@ -637,20 +622,9 @@ func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
// Zones returns an implementation of Zones for Google vSphere.
func (vs *VSphere) Zones() (cloudprovider.Zones, bool) {
glog.V(1).Info("Claiming to support Zones")
glog.V(1).Info("The vSphere cloud provider does not support zones")
return vs, true
}
func (vs *VSphere) GetZone() (cloudprovider.Zone, error) {
glog.V(1).Infof("Current datacenter is %v, cluster is %v", vs.cfg.Global.Datacenter, vs.clusterName)
// The clusterName is determined from the VirtualMachine ManagedObjectReference during init
// If the VM is not created within a Cluster, this will return empty-string
return cloudprovider.Zone{
Region: vs.cfg.Global.Datacenter,
FailureDomain: vs.clusterName,
}, nil
return nil, false
}
// Routes returns a false since the interface is not supported for vSphere.
@ -957,11 +931,12 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DiskIsAttached will assume vmdk %q is not attached to it.",
vSphereInstance,
volPath)
return false, nil
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist",
volPath,
vSphereInstance)
return false, fmt.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist",
volPath,
vSphereInstance)
}
// Get VM device list
@ -1008,11 +983,12 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DisksAreAttached will assume vmdk %v are not attached to it.",
vSphereInstance,
volPaths)
return attached, nil
glog.Errorf("DisksAreAttached failed to determine whether disks %v are still attached: node %q does not exist",
volPaths,
vSphereInstance)
return attached, fmt.Errorf("DisksAreAttached failed to determine whether disks %v are still attached: node %q does not exist",
volPaths,
vSphereInstance)
}
// Get VM device list
@ -1178,21 +1154,6 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
vSphereInstance = nodeNameToVMName(nodeName)
}
nodeExist, err := vs.NodeExists(vs.client, nodeName)
if err != nil {
glog.Errorf("Failed to check whether node exist. err: %s.", err)
return err
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.",
nodeName,
volPath)
return nil
}
vm, vmDevices, _, dc, err := getVirtualMachineDevices(vs.cfg, ctx, vs.client, vSphereInstance)
if err != nil {
return err
@ -1319,6 +1280,9 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
// Create a virtual disk manager
virtualDiskManager := object.NewVirtualDiskManager(vs.client.Client)
if filepath.Ext(vmDiskPath) != ".vmdk" {
vmDiskPath += ".vmdk"
}
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
if err != nil {

View File

@ -800,9 +800,10 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
}
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its
// updatedReplicas and it doesn't violate minimum availability.
// updatedReplicas, no old pods are running, and it doesn't violate minimum availability.
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == deployment.Spec.Replicas &&
newStatus.Replicas == deployment.Spec.Replicas &&
newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment)
}

View File

@ -19,6 +19,7 @@ go_library(
"//pkg/api/unversioned:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/policy:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
@ -47,6 +48,7 @@ go_test(
"//pkg/apimachinery/registered:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/policy:go_default_library",
"//pkg/apis/apps:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/controller:go_default_library",

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/client/cache"
@ -50,7 +51,7 @@ const statusUpdateRetries = 2
// all and the corresponding entry can be removed from pdb.Status.DisruptedPods. It is assumed that
// pod/pdb apiserver to controller latency is relatively small (like 1-2sec) so the below value should
// be more than enough.
// If the cotroller is running on a different node it is important that the two nodes have synced
// If the controller is running on a different node it is important that the two nodes have synced
// clock (via ntp for example). Otherwise PodDisruptionBudget controller may not provide enough
// protection against unwanted pod disruptions.
const DeletionTimeout = 2 * 60 * time.Second
@ -79,6 +80,10 @@ type DisruptionController struct {
dController *cache.Controller
dLister cache.StoreToDeploymentLister
ssStore cache.Store
ssController *cache.Controller
ssLister cache.StoreToStatefulSetLister
// PodDisruptionBudget keys that need to be synced.
queue workqueue.RateLimitingInterface
recheckQueue workqueue.DelayingInterface
@ -186,9 +191,23 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
dc.dLister.Indexer = dc.dIndexer
dc.ssStore, dc.ssController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Apps().StatefulSets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Apps().StatefulSets(api.NamespaceAll).Watch(options)
},
},
&apps.StatefulSet{},
30*time.Second,
cache.ResourceEventHandlerFuncs{},
)
dc.ssLister.Store = dc.ssStore
return dc
}
@ -200,7 +219,8 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
// and we may well need further tweaks just to be able to access scale
// subresources.
func (dc *DisruptionController) finders() []podControllerFinder {
return []podControllerFinder{dc.getPodReplicationControllers, dc.getPodDeployments, dc.getPodReplicaSets}
return []podControllerFinder{dc.getPodReplicationControllers, dc.getPodDeployments, dc.getPodReplicaSets,
dc.getPodStatefulSets}
}
// getPodReplicaSets finds replicasets which have no matching deployments.
@ -230,6 +250,29 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
return cas, nil
}
// getPodStatefulSet returns the statefulset managing the given pod.
func (dc *DisruptionController) getPodStatefulSets(pod *api.Pod) ([]controllerAndScale, error) {
cas := []controllerAndScale{}
ss, err := dc.ssLister.GetPodStatefulSets(pod)
// GetPodStatefulSets returns an error only if no StatefulSets are found. We
// don't return that as an error to the caller.
if err != nil {
return cas, nil
}
controllerScale := map[types.UID]int32{}
for _, s := range ss {
controllerScale[s.UID] = s.Spec.Replicas
}
for uid, scale := range controllerScale {
cas = append(cas, controllerAndScale{UID: uid, scale: scale})
}
return cas, nil
}
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAndScale, error) {
cas := []controllerAndScale{}
@ -283,6 +326,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
go dc.rcController.Run(stopCh)
go dc.rsController.Run(stopCh)
go dc.dController.Run(stopCh)
go dc.ssController.Run(stopCh)
go wait.Until(dc.worker, time.Second, stopCh)
go wait.Until(dc.recheckWorker, time.Second, stopCh)

View File

@ -58,9 +58,13 @@ const (
// An annotation on the Service denoting if the endpoints controller should
// go ahead and create endpoints for unready pods. This annotation is
// currently only used by StatefulSets, where we need the pod to be DNS
// resolvable during initialization. In this situation we create a headless
// service just for the StatefulSet, and clients shouldn't be using this Service
// for anything so unready endpoints don't matter.
// resolvable during initialization and termination. In this situation we
// create a headless Service just for the StatefulSet, and clients shouldn't
// be using this Service for anything so unready endpoints don't matter.
// Endpoints of these Services retain their DNS records and continue
// receiving traffic for the Service from the moment the kubelet starts all
// containers in the pod and marks it "Running", till the kubelet stops all
// containers and deletes the pod from the apiserver.
TolerateUnreadyEndpointsAnnotation = "service.alpha.kubernetes.io/tolerate-unready-endpoints"
)
@ -402,7 +406,7 @@ func (e *EndpointController) syncService(key string) error {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}

View File

@ -936,8 +936,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *api.PersistentVolu
glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name)
opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID))
ctrl.scheduleOperation(opName, func() error {
ctrl.deleteVolumeOperation(volume)
return nil
return ctrl.deleteVolumeOperation(volume)
})
default:
@ -1042,12 +1041,13 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
// deleteVolumeOperation deletes a volume. This method is running in standalone
// goroutine and already has all necessary locks.
func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) {
func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) error {
volume, ok := arg.(*api.PersistentVolume)
if !ok {
glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %#v", arg)
return
return nil
}
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
// This method may have been waiting for a volume lock for some time.
@ -1056,16 +1056,16 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) {
newVolume, err := ctrl.kubeClient.Core().PersistentVolumes().Get(volume.Name)
if err != nil {
glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err)
return
return nil
}
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
if err != nil {
glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err)
return
return nil
}
if !needsReclaim {
glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name)
return
return nil
}
deleted, err := ctrl.doDeleteVolume(volume)
@ -1079,20 +1079,20 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) {
} else {
// The plugin failed, mark the volume as Failed and send Warning
// event
if _, err = ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedDelete", err.Error()); err != nil {
if _, err := ctrl.updateVolumePhaseWithEvent(volume, api.VolumeFailed, api.EventTypeWarning, "VolumeFailedDelete", err.Error()); err != nil {
glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err)
// Save failed, retry on the next deletion attempt
return
return err
}
}
// Despite the volume being Failed, the controller will retry deleting
// the volume in every syncVolume() call.
return
return err
}
if !deleted {
// The volume waits for deletion by an external plugin. Do nothing.
return
return nil
}
glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name)
@ -1103,9 +1103,9 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) {
// cache of "recently deleted volumes" and avoid unnecessary deletion,
// this is left out as future optimization.
glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err)
return
return nil
}
return
return nil
}
// isVolumeReleased returns true if given volume is released and can be recycled

View File

@ -71,7 +71,7 @@ func NewController(p ControllerParameters) *PersistentVolumeController {
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: p.KubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
cloud: p.Cloud,
enableDynamicProvisioning: p.EnableDynamicProvisioning,
clusterName: p.ClusterName,

View File

@ -38,6 +38,7 @@ var AWSRegions = [...]string{
"us-west-1",
"us-west-2",
"eu-west-1",
"eu-west-2",
"eu-central-1",
"ap-south-1",
"ap-southeast-1",

View File

@ -17,7 +17,6 @@ limitations under the License.
package azure
import (
"fmt"
"io/ioutil"
"time"
@ -133,12 +132,17 @@ func (a *acrProvider) Provide() credentialprovider.DockerConfig {
return cfg
}
for ix := range *res.Value {
// TODO: I don't think this will work for national clouds
cfg[fmt.Sprintf("%s.azurecr.io", *(*res.Value)[ix].Name)] = entry
loginServer := getLoginServer((*res.Value)[ix])
glog.V(4).Infof("Adding Azure Container Registry docker credential for %s", loginServer)
cfg[loginServer] = entry
}
return cfg
}
func getLoginServer(registry containerregistry.Registry) string {
return *(*registry.RegistryProperties).LoginServer
}
func (a *acrProvider) LazyProvide() *credentialprovider.DockerConfigEntry {
return nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package genericapiserver
import (
"crypto/x509"
"fmt"
"io"
"net"
@ -165,7 +166,7 @@ type SecureServingInfo struct {
// SNICerts are named CertKeys for serving secure traffic with SNI support.
SNICerts []NamedCertKey
// ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
ClientCA string
ClientCA *x509.CertPool
}
type CertKey struct {
@ -254,7 +255,6 @@ func (c *Config) ApplyOptions(options *options.ServerRunOptions) *Config {
},
},
SNICerts: []NamedCertKey{},
ClientCA: options.ClientCAFile,
}
if options.TLSCertFile == "" && options.TLSPrivateKeyFile == "" {
secureServingInfo.ServerCert.Generate = true
@ -262,6 +262,35 @@ func (c *Config) ApplyOptions(options *options.ServerRunOptions) *Config {
secureServingInfo.ServerCert.KeyFile = path.Join(options.CertDirectory, "apiserver.key")
}
if len(options.ClientCAFile) > 0 {
clientCAs, err := certutil.CertsFromFile(options.ClientCAFile)
if err != nil {
// normally this would be no-no, but its the minimal change to backport to 1.5 and
// every caller is going to do this.
panic(fmt.Errorf("unable to load client CA file: %v", err))
}
if secureServingInfo.ClientCA == nil {
secureServingInfo.ClientCA = x509.NewCertPool()
}
for _, cert := range clientCAs {
secureServingInfo.ClientCA.AddCert(cert)
}
}
if len(options.RequestHeaderClientCAFile) > 0 {
clientCAs, err := certutil.CertsFromFile(options.RequestHeaderClientCAFile)
if err != nil {
// normally this would be no-no, but its the minimal change to backport to 1.5 and
// every caller is going to do this.
panic(fmt.Errorf("unable to load requestheader client CA file: %v", err))
}
if secureServingInfo.ClientCA == nil {
secureServingInfo.ClientCA = x509.NewCertPool()
}
for _, cert := range clientCAs {
secureServingInfo.ClientCA.AddCert(cert)
}
}
secureServingInfo.SNICerts = nil
for _, nkc := range options.SNICertKeys {
secureServingInfo.SNICerts = append(secureServingInfo.SNICerts, NamedCertKey{

View File

@ -26,7 +26,6 @@ import (
"sync"
"time"
certutil "k8s.io/kubernetes/pkg/util/cert"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/validation"
@ -78,16 +77,12 @@ func (s *GenericAPIServer) serveSecurely(stopCh <-chan struct{}) error {
secureServer.TLSConfig.Certificates = append(secureServer.TLSConfig.Certificates, *c)
}
if len(s.SecureServingInfo.ClientCA) > 0 {
clientCAs, err := certutil.NewPool(s.SecureServingInfo.ClientCA)
if err != nil {
return fmt.Errorf("unable to load client CA file: %v", err)
}
if s.SecureServingInfo.ClientCA != nil {
// Populate PeerCertificates in requests, but don't reject connections without certificates
// This allows certificates to be validated by authenticators, while still allowing other auth types
secureServer.TLSConfig.ClientAuth = tls.RequestClientCert
// Specify allowed CAs for client certificates
secureServer.TLSConfig.ClientCAs = clientCAs
secureServer.TLSConfig.ClientCAs = s.SecureServingInfo.ClientCA
}
glog.Infof("Serving securely on %s", s.SecureServingInfo.BindAddress)

View File

@ -21,6 +21,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/genericapiserver/options"
certutil "k8s.io/kubernetes/pkg/util/cert"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
)
@ -69,6 +70,22 @@ func verifySecureAndInsecurePort(options *options.ServerRunOptions) []error {
return errors
}
func verifyClientCAs(options *options.ServerRunOptions) []error {
errors := []error{}
if len(options.ClientCAFile) > 0 {
if _, err := certutil.CertsFromFile(options.ClientCAFile); err != nil {
errors = append(errors, fmt.Errorf("unable to load client CA file: %v", err))
}
}
if len(options.RequestHeaderClientCAFile) > 0 {
if _, err := certutil.CertsFromFile(options.RequestHeaderClientCAFile); err != nil {
errors = append(errors, fmt.Errorf("unable to load requestheader client CA file: %v", err))
}
}
return errors
}
func ValidateRunOptions(options *options.ServerRunOptions) {
errors := []error{}
if errs := verifyClusterIPFlags(options); len(errs) > 0 {
@ -80,6 +97,9 @@ func ValidateRunOptions(options *options.ServerRunOptions) {
if errs := verifySecureAndInsecurePort(options); len(errs) > 0 {
errors = append(errors, errs...)
}
if errs := verifyClientCAs(options); len(errs) > 0 {
errors = append(errors, errs...)
}
if err := utilerrors.NewAggregate(errors); err != nil {
glog.Fatalf("Validate server run options failed: %v", err)
}

View File

@ -113,6 +113,7 @@ go_library(
"//pkg/util/wait:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",

View File

@ -141,7 +141,7 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeApi
if rOpts != nil {
hc.Resources = dockercontainer.Resources{
Memory: rOpts.GetMemoryLimitInBytes(),
MemorySwap: -1,
MemorySwap: dockertools.DefaultMemorySwap(),
CPUShares: rOpts.GetCpuShares(),
CPUQuota: rOpts.GetCpuQuota(),
CPUPeriod: rOpts.GetCpuPeriod(),

View File

@ -26,6 +26,7 @@ import (
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
)
@ -370,7 +371,7 @@ func sharesHostNetwork(container *dockertypes.ContainerJSON) bool {
func setSandboxResources(hc *dockercontainer.HostConfig) {
hc.Resources = dockercontainer.Resources{
MemorySwap: -1,
MemorySwap: dockertools.DefaultMemorySwap(),
CPUShares: defaultSandboxCPUshares,
// Use docker's default cpu quota/period.
}

Some files were not shown because too many files have changed in this diff Show More